diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-12-16 16:02:15 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-12-16 16:02:15 -0500 |
commit | d8c58fabd75021cdd99abcd96513cb088d41092b (patch) | |
tree | f6554ecfb27c0d50f5ae6acae3a7077282813cab | |
parent | 9c04f015ebc2cc2cca5a4a576deb82a311578edc (diff) | |
parent | b08ebe7e776e5be0271ed1e1bbb384e1f29dd117 (diff) |
Merge remote branch 'airlied/drm-core-next' into drm-intel-next
114 files changed, 10988 insertions, 5145 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index bede10a03407..c6b2e27a446a 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -336,7 +336,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
336 | struct drm_framebuffer *old_fb) | 336 | struct drm_framebuffer *old_fb) |
337 | { | 337 | { |
338 | struct drm_device *dev = crtc->dev; | 338 | struct drm_device *dev = crtc->dev; |
339 | struct drm_display_mode *adjusted_mode, saved_mode; | 339 | struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; |
340 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | 340 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
341 | struct drm_encoder_helper_funcs *encoder_funcs; | 341 | struct drm_encoder_helper_funcs *encoder_funcs; |
342 | int saved_x, saved_y; | 342 | int saved_x, saved_y; |
@@ -350,6 +350,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
350 | if (!crtc->enabled) | 350 | if (!crtc->enabled) |
351 | return true; | 351 | return true; |
352 | 352 | ||
353 | saved_hwmode = crtc->hwmode; | ||
353 | saved_mode = crtc->mode; | 354 | saved_mode = crtc->mode; |
354 | saved_x = crtc->x; | 355 | saved_x = crtc->x; |
355 | saved_y = crtc->y; | 356 | saved_y = crtc->y; |
@@ -427,11 +428,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
427 | 428 | ||
428 | } | 429 | } |
429 | 430 | ||
431 | /* Store real post-adjustment hardware mode. */ | ||
432 | crtc->hwmode = *adjusted_mode; | ||
433 | |||
434 | /* Calculate and store various constants which | ||
435 | * are later needed by vblank and swap-completion | ||
436 | * timestamping. They are derived from true hwmode. | ||
437 | */ | ||
438 | drm_calc_timestamping_constants(crtc); | ||
439 | |||
430 | /* XXX free adjustedmode */ | 440 | /* XXX free adjustedmode */ |
431 | drm_mode_destroy(dev, adjusted_mode); | 441 | drm_mode_destroy(dev, adjusted_mode); |
432 | /* FIXME: add subpixel order */ | 442 | /* FIXME: add subpixel order */ |
433 | done: | 443 | done: |
434 | if (!ret) { | 444 | if (!ret) { |
445 | crtc->hwmode = saved_hwmode; | ||
435 | crtc->mode = saved_mode; | 446 | crtc->mode = saved_mode; |
436 | crtc->x = saved_x; | 447 | crtc->x = saved_x; |
437 | crtc->y = saved_y; | 448 | crtc->y = saved_y; |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 722700d5d73e..77c875db5aae 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -40,6 +40,22 @@ | |||
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | 41 | ||
42 | #include <linux/vgaarb.h> | 42 | #include <linux/vgaarb.h> |
43 | |||
44 | /* Access macro for slots in vblank timestamp ringbuffer. */ | ||
45 | #define vblanktimestamp(dev, crtc, count) ( \ | ||
46 | (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \ | ||
47 | ((count) % DRM_VBLANKTIME_RBSIZE)]) | ||
48 | |||
49 | /* Retry timestamp calculation up to 3 times to satisfy | ||
50 | * drm_timestamp_precision before giving up. | ||
51 | */ | ||
52 | #define DRM_TIMESTAMP_MAXRETRIES 3 | ||
53 | |||
54 | /* Threshold in nanoseconds for detection of redundant | ||
55 | * vblank irq in drm_handle_vblank(). 1 msec should be ok. | ||
56 | */ | ||
57 | #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 | ||
58 | |||
43 | /** | 59 | /** |
44 | * Get interrupt from bus id. | 60 | * Get interrupt from bus id. |
45 | * | 61 | * |
@@ -77,6 +93,87 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, | |||
77 | return 0; | 93 | return 0; |
78 | } | 94 | } |
79 | 95 | ||
96 | /* | ||
97 | * Clear vblank timestamp buffer for a crtc. | ||
98 | */ | ||
99 | static void clear_vblank_timestamps(struct drm_device *dev, int crtc) | ||
100 | { | ||
101 | memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0, | ||
102 | DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval)); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Disable vblank irq's on crtc, make sure that last vblank count | ||
107 | * of hardware and corresponding consistent software vblank counter | ||
108 | * are preserved, even if there are any spurious vblank irq's after | ||
109 | * disable. | ||
110 | */ | ||
111 | static void vblank_disable_and_save(struct drm_device *dev, int crtc) | ||
112 | { | ||
113 | unsigned long irqflags; | ||
114 | u32 vblcount; | ||
115 | s64 diff_ns; | ||
116 | int vblrc; | ||
117 | struct timeval tvblank; | ||
118 | |||
119 | /* Prevent vblank irq processing while disabling vblank irqs, | ||
120 | * so no updates of timestamps or count can happen after we've | ||
121 | * disabled. Needed to prevent races in case of delayed irq's. | ||
122 | * Disable preemption, so vblank_time_lock is held as short as | ||
123 | * possible, even under a kernel with PREEMPT_RT patches. | ||
124 | */ | ||
125 | preempt_disable(); | ||
126 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags); | ||
127 | |||
128 | dev->driver->disable_vblank(dev, crtc); | ||
129 | dev->vblank_enabled[crtc] = 0; | ||
130 | |||
131 | /* No further vblank irq's will be processed after | ||
132 | * this point. Get current hardware vblank count and | ||
133 | * vblank timestamp, repeat until they are consistent. | ||
134 | * | ||
135 | * FIXME: There is still a race condition here and in | ||
136 | * drm_update_vblank_count() which can cause off-by-one | ||
137 | * reinitialization of software vblank counter. If gpu | ||
138 | * vblank counter doesn't increment exactly at the leading | ||
139 | * edge of a vblank interval, then we can lose 1 count if | ||
140 | * we happen to execute between start of vblank and the | ||
141 | * delayed gpu counter increment. | ||
142 | */ | ||
143 | do { | ||
144 | dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); | ||
145 | vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); | ||
146 | } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc)); | ||
147 | |||
148 | /* Compute time difference to stored timestamp of last vblank | ||
149 | * as updated by last invocation of drm_handle_vblank() in vblank irq. | ||
150 | */ | ||
151 | vblcount = atomic_read(&dev->_vblank_count[crtc]); | ||
152 | diff_ns = timeval_to_ns(&tvblank) - | ||
153 | timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); | ||
154 | |||
155 | /* If there is at least 1 msec difference between the last stored | ||
156 | * timestamp and tvblank, then we are currently executing our | ||
157 | * disable inside a new vblank interval, the tvblank timestamp | ||
158 | * corresponds to this new vblank interval and the irq handler | ||
159 | * for this vblank didn't run yet and won't run due to our disable. | ||
160 | * Therefore we need to do the job of drm_handle_vblank() and | ||
161 | * increment the vblank counter by one to account for this vblank. | ||
162 | * | ||
163 | * Skip this step if there isn't any high precision timestamp | ||
164 | * available. In that case we can't account for this and just | ||
165 | * hope for the best. | ||
166 | */ | ||
167 | if ((vblrc > 0) && (abs(diff_ns) > 1000000)) | ||
168 | atomic_inc(&dev->_vblank_count[crtc]); | ||
169 | |||
170 | /* Invalidate all timestamps while vblank irq's are off. */ | ||
171 | clear_vblank_timestamps(dev, crtc); | ||
172 | |||
173 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); | ||
174 | preempt_enable(); | ||
175 | } | ||
176 | |||
80 | static void vblank_disable_fn(unsigned long arg) | 177 | static void vblank_disable_fn(unsigned long arg) |
81 | { | 178 | { |
82 | struct drm_device *dev = (struct drm_device *)arg; | 179 | struct drm_device *dev = (struct drm_device *)arg; |
@@ -91,10 +188,7 @@ static void vblank_disable_fn(unsigned long arg) | |||
91 | if (atomic_read(&dev->vblank_refcount[i]) == 0 && | 188 | if (atomic_read(&dev->vblank_refcount[i]) == 0 && |
92 | dev->vblank_enabled[i]) { | 189 | dev->vblank_enabled[i]) { |
93 | DRM_DEBUG("disabling vblank on crtc %d\n", i); | 190 | DRM_DEBUG("disabling vblank on crtc %d\n", i); |
94 | dev->last_vblank[i] = | 191 | vblank_disable_and_save(dev, i); |
95 | dev->driver->get_vblank_counter(dev, i); | ||
96 | dev->driver->disable_vblank(dev, i); | ||
97 | dev->vblank_enabled[i] = 0; | ||
98 | } | 192 | } |
99 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | 193 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); |
100 | } | 194 | } |
@@ -117,6 +211,7 @@ void drm_vblank_cleanup(struct drm_device *dev) | |||
117 | kfree(dev->last_vblank); | 211 | kfree(dev->last_vblank); |
118 | kfree(dev->last_vblank_wait); | 212 | kfree(dev->last_vblank_wait); |
119 | kfree(dev->vblank_inmodeset); | 213 | kfree(dev->vblank_inmodeset); |
214 | kfree(dev->_vblank_time); | ||
120 | 215 | ||
121 | dev->num_crtcs = 0; | 216 | dev->num_crtcs = 0; |
122 | } | 217 | } |
@@ -129,6 +224,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
129 | setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, | 224 | setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, |
130 | (unsigned long)dev); | 225 | (unsigned long)dev); |
131 | spin_lock_init(&dev->vbl_lock); | 226 | spin_lock_init(&dev->vbl_lock); |
227 | spin_lock_init(&dev->vblank_time_lock); | ||
228 | |||
132 | dev->num_crtcs = num_crtcs; | 229 | dev->num_crtcs = num_crtcs; |
133 | 230 | ||
134 | dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, | 231 | dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, |
@@ -161,6 +258,19 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
161 | if (!dev->vblank_inmodeset) | 258 | if (!dev->vblank_inmodeset) |
162 | goto err; | 259 | goto err; |
163 | 260 | ||
261 | dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE, | ||
262 | sizeof(struct timeval), GFP_KERNEL); | ||
263 | if (!dev->_vblank_time) | ||
264 | goto err; | ||
265 | |||
266 | DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n"); | ||
267 | |||
268 | /* Driver specific high-precision vblank timestamping supported? */ | ||
269 | if (dev->driver->get_vblank_timestamp) | ||
270 | DRM_INFO("Driver supports precise vblank timestamp query.\n"); | ||
271 | else | ||
272 | DRM_INFO("No driver support for vblank timestamp query.\n"); | ||
273 | |||
164 | /* Zero per-crtc vblank stuff */ | 274 | /* Zero per-crtc vblank stuff */ |
165 | for (i = 0; i < num_crtcs; i++) { | 275 | for (i = 0; i < num_crtcs; i++) { |
166 | init_waitqueue_head(&dev->vbl_queue[i]); | 276 | init_waitqueue_head(&dev->vbl_queue[i]); |
@@ -279,7 +389,7 @@ EXPORT_SYMBOL(drm_irq_install); | |||
279 | * | 389 | * |
280 | * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. | 390 | * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. |
281 | */ | 391 | */ |
282 | int drm_irq_uninstall(struct drm_device * dev) | 392 | int drm_irq_uninstall(struct drm_device *dev) |
283 | { | 393 | { |
284 | unsigned long irqflags; | 394 | unsigned long irqflags; |
285 | int irq_enabled, i; | 395 | int irq_enabled, i; |
@@ -335,7 +445,9 @@ int drm_control(struct drm_device *dev, void *data, | |||
335 | { | 445 | { |
336 | struct drm_control *ctl = data; | 446 | struct drm_control *ctl = data; |
337 | 447 | ||
338 | /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ | 448 | /* if we haven't irq we fallback for compatibility reasons - |
449 | * this used to be a separate function in drm_dma.h | ||
450 | */ | ||
339 | 451 | ||
340 | 452 | ||
341 | switch (ctl->func) { | 453 | switch (ctl->func) { |
@@ -360,6 +472,287 @@ int drm_control(struct drm_device *dev, void *data, | |||
360 | } | 472 | } |
361 | 473 | ||
362 | /** | 474 | /** |
475 | * drm_calc_timestamping_constants - Calculate and | ||
476 | * store various constants which are later needed by | ||
477 | * vblank and swap-completion timestamping, e.g, by | ||
478 | * drm_calc_vbltimestamp_from_scanoutpos(). | ||
479 | * They are derived from crtc's true scanout timing, | ||
480 | * so they take things like panel scaling or other | ||
481 | * adjustments into account. | ||
482 | * | ||
483 | * @crtc drm_crtc whose timestamp constants should be updated. | ||
484 | * | ||
485 | */ | ||
486 | void drm_calc_timestamping_constants(struct drm_crtc *crtc) | ||
487 | { | ||
488 | s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0; | ||
489 | u64 dotclock; | ||
490 | |||
491 | /* Dot clock in Hz: */ | ||
492 | dotclock = (u64) crtc->hwmode.clock * 1000; | ||
493 | |||
494 | /* Valid dotclock? */ | ||
495 | if (dotclock > 0) { | ||
496 | /* Convert scanline length in pixels and video dot clock to | ||
497 | * line duration, frame duration and pixel duration in | ||
498 | * nanoseconds: | ||
499 | */ | ||
500 | pixeldur_ns = (s64) div64_u64(1000000000, dotclock); | ||
501 | linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal * | ||
502 | 1000000000), dotclock); | ||
503 | framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns; | ||
504 | } else | ||
505 | DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", | ||
506 | crtc->base.id); | ||
507 | |||
508 | crtc->pixeldur_ns = pixeldur_ns; | ||
509 | crtc->linedur_ns = linedur_ns; | ||
510 | crtc->framedur_ns = framedur_ns; | ||
511 | |||
512 | DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", | ||
513 | crtc->base.id, crtc->hwmode.crtc_htotal, | ||
514 | crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay); | ||
515 | DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", | ||
516 | crtc->base.id, (int) dotclock/1000, (int) framedur_ns, | ||
517 | (int) linedur_ns, (int) pixeldur_ns); | ||
518 | } | ||
519 | EXPORT_SYMBOL(drm_calc_timestamping_constants); | ||
520 | |||
521 | /** | ||
522 | * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms | ||
523 | * drivers. Implements calculation of exact vblank timestamps from | ||
524 | * given drm_display_mode timings and current video scanout position | ||
525 | * of a crtc. This can be called from within get_vblank_timestamp() | ||
526 | * implementation of a kms driver to implement the actual timestamping. | ||
527 | * | ||
528 | * Should return timestamps conforming to the OML_sync_control OpenML | ||
529 | * extension specification. The timestamp corresponds to the end of | ||
530 | * the vblank interval, aka start of scanout of topmost-leftmost display | ||
531 | * pixel in the following video frame. | ||
532 | * | ||
533 | * Requires support for optional dev->driver->get_scanout_position() | ||
534 | * in kms driver, plus a bit of setup code to provide a drm_display_mode | ||
535 | * that corresponds to the true scanout timing. | ||
536 | * | ||
537 | * The current implementation only handles standard video modes. It | ||
538 | * returns as no operation if a doublescan or interlaced video mode is | ||
539 | * active. Higher level code is expected to handle this. | ||
540 | * | ||
541 | * @dev: DRM device. | ||
542 | * @crtc: Which crtc's vblank timestamp to retrieve. | ||
543 | * @max_error: Desired maximum allowable error in timestamps (nanosecs). | ||
544 | * On return contains true maximum error of timestamp. | ||
545 | * @vblank_time: Pointer to struct timeval which should receive the timestamp. | ||
546 | * @flags: Flags to pass to driver: | ||
547 | * 0 = Default. | ||
548 | * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. | ||
549 | * @refcrtc: drm_crtc* of crtc which defines scanout timing. | ||
550 | * | ||
551 | * Returns negative value on error, failure or if not supported in current | ||
552 | * video mode: | ||
553 | * | ||
554 | * -EINVAL - Invalid crtc. | ||
555 | * -EAGAIN - Temporary unavailable, e.g., called before initial modeset. | ||
556 | * -ENOTSUPP - Function not supported in current display mode. | ||
557 | * -EIO - Failed, e.g., due to failed scanout position query. | ||
558 | * | ||
559 | * Returns or'ed positive status flags on success: | ||
560 | * | ||
561 | * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping. | ||
562 | * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval. | ||
563 | * | ||
564 | */ | ||
565 | int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | ||
566 | int *max_error, | ||
567 | struct timeval *vblank_time, | ||
568 | unsigned flags, | ||
569 | struct drm_crtc *refcrtc) | ||
570 | { | ||
571 | struct timeval stime, raw_time; | ||
572 | struct drm_display_mode *mode; | ||
573 | int vbl_status, vtotal, vdisplay; | ||
574 | int vpos, hpos, i; | ||
575 | s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; | ||
576 | bool invbl; | ||
577 | |||
578 | if (crtc < 0 || crtc >= dev->num_crtcs) { | ||
579 | DRM_ERROR("Invalid crtc %d\n", crtc); | ||
580 | return -EINVAL; | ||
581 | } | ||
582 | |||
583 | /* Scanout position query not supported? Should not happen. */ | ||
584 | if (!dev->driver->get_scanout_position) { | ||
585 | DRM_ERROR("Called from driver w/o get_scanout_position()!?\n"); | ||
586 | return -EIO; | ||
587 | } | ||
588 | |||
589 | mode = &refcrtc->hwmode; | ||
590 | vtotal = mode->crtc_vtotal; | ||
591 | vdisplay = mode->crtc_vdisplay; | ||
592 | |||
593 | /* Durations of frames, lines, pixels in nanoseconds. */ | ||
594 | framedur_ns = refcrtc->framedur_ns; | ||
595 | linedur_ns = refcrtc->linedur_ns; | ||
596 | pixeldur_ns = refcrtc->pixeldur_ns; | ||
597 | |||
598 | /* If mode timing undefined, just return as no-op: | ||
599 | * Happens during initial modesetting of a crtc. | ||
600 | */ | ||
601 | if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) { | ||
602 | DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc); | ||
603 | return -EAGAIN; | ||
604 | } | ||
605 | |||
606 | /* Don't know yet how to handle interlaced or | ||
607 | * double scan modes. Just no-op for now. | ||
608 | */ | ||
609 | if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) { | ||
610 | DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc); | ||
611 | return -ENOTSUPP; | ||
612 | } | ||
613 | |||
614 | /* Get current scanout position with system timestamp. | ||
615 | * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times | ||
616 | * if single query takes longer than max_error nanoseconds. | ||
617 | * | ||
618 | * This guarantees a tight bound on maximum error if | ||
619 | * code gets preempted or delayed for some reason. | ||
620 | */ | ||
621 | for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) { | ||
622 | /* Disable preemption to make it very likely to | ||
623 | * succeed in the first iteration even on PREEMPT_RT kernel. | ||
624 | */ | ||
625 | preempt_disable(); | ||
626 | |||
627 | /* Get system timestamp before query. */ | ||
628 | do_gettimeofday(&stime); | ||
629 | |||
630 | /* Get vertical and horizontal scanout pos. vpos, hpos. */ | ||
631 | vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos); | ||
632 | |||
633 | /* Get system timestamp after query. */ | ||
634 | do_gettimeofday(&raw_time); | ||
635 | |||
636 | preempt_enable(); | ||
637 | |||
638 | /* Return as no-op if scanout query unsupported or failed. */ | ||
639 | if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { | ||
640 | DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", | ||
641 | crtc, vbl_status); | ||
642 | return -EIO; | ||
643 | } | ||
644 | |||
645 | duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime); | ||
646 | |||
647 | /* Accept result with < max_error nsecs timing uncertainty. */ | ||
648 | if (duration_ns <= (s64) *max_error) | ||
649 | break; | ||
650 | } | ||
651 | |||
652 | /* Noisy system timing? */ | ||
653 | if (i == DRM_TIMESTAMP_MAXRETRIES) { | ||
654 | DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n", | ||
655 | crtc, (int) duration_ns/1000, *max_error/1000, i); | ||
656 | } | ||
657 | |||
658 | /* Return upper bound of timestamp precision error. */ | ||
659 | *max_error = (int) duration_ns; | ||
660 | |||
661 | /* Check if in vblank area: | ||
662 | * vpos is >=0 in video scanout area, but negative | ||
663 | * within vblank area, counting down the number of lines until | ||
664 | * start of scanout. | ||
665 | */ | ||
666 | invbl = vbl_status & DRM_SCANOUTPOS_INVBL; | ||
667 | |||
668 | /* Convert scanout position into elapsed time at raw_time query | ||
669 | * since start of scanout at first display scanline. delta_ns | ||
670 | * can be negative if start of scanout hasn't happened yet. | ||
671 | */ | ||
672 | delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns; | ||
673 | |||
674 | /* Is vpos outside nominal vblank area, but less than | ||
675 | * 1/100 of a frame height away from start of vblank? | ||
676 | * If so, assume this isn't a massively delayed vblank | ||
677 | * interrupt, but a vblank interrupt that fired a few | ||
678 | * microseconds before true start of vblank. Compensate | ||
679 | * by adding a full frame duration to the final timestamp. | ||
680 | * Happens, e.g., on ATI R500, R600. | ||
681 | * | ||
682 | * We only do this if DRM_CALLED_FROM_VBLIRQ. | ||
683 | */ | ||
684 | if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl && | ||
685 | ((vdisplay - vpos) < vtotal / 100)) { | ||
686 | delta_ns = delta_ns - framedur_ns; | ||
687 | |||
688 | /* Signal this correction as "applied". */ | ||
689 | vbl_status |= 0x8; | ||
690 | } | ||
691 | |||
692 | /* Subtract time delta from raw timestamp to get final | ||
693 | * vblank_time timestamp for end of vblank. | ||
694 | */ | ||
695 | *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); | ||
696 | |||
697 | DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n", | ||
698 | crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec, | ||
699 | raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec, | ||
700 | (int) duration_ns/1000, i); | ||
701 | |||
702 | vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; | ||
703 | if (invbl) | ||
704 | vbl_status |= DRM_VBLANKTIME_INVBL; | ||
705 | |||
706 | return vbl_status; | ||
707 | } | ||
708 | EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos); | ||
709 | |||
710 | /** | ||
711 | * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent | ||
712 | * vblank interval. | ||
713 | * | ||
714 | * @dev: DRM device | ||
715 | * @crtc: which crtc's vblank timestamp to retrieve | ||
716 | * @tvblank: Pointer to target struct timeval which should receive the timestamp | ||
717 | * @flags: Flags to pass to driver: | ||
718 | * 0 = Default. | ||
719 | * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. | ||
720 | * | ||
721 | * Fetches the system timestamp corresponding to the time of the most recent | ||
722 | * vblank interval on specified crtc. May call into kms-driver to | ||
723 | * compute the timestamp with a high-precision GPU specific method. | ||
724 | * | ||
725 | * Returns zero if timestamp originates from uncorrected do_gettimeofday() | ||
726 | * call, i.e., it isn't very precisely locked to the true vblank. | ||
727 | * | ||
728 | * Returns non-zero if timestamp is considered to be very precise. | ||
729 | */ | ||
730 | u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, | ||
731 | struct timeval *tvblank, unsigned flags) | ||
732 | { | ||
733 | int ret = 0; | ||
734 | |||
735 | /* Define requested maximum error on timestamps (nanoseconds). */ | ||
736 | int max_error = (int) drm_timestamp_precision * 1000; | ||
737 | |||
738 | /* Query driver if possible and precision timestamping enabled. */ | ||
739 | if (dev->driver->get_vblank_timestamp && (max_error > 0)) { | ||
740 | ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error, | ||
741 | tvblank, flags); | ||
742 | if (ret > 0) | ||
743 | return (u32) ret; | ||
744 | } | ||
745 | |||
746 | /* GPU high precision timestamp query unsupported or failed. | ||
747 | * Return gettimeofday timestamp as best estimate. | ||
748 | */ | ||
749 | do_gettimeofday(tvblank); | ||
750 | |||
751 | return 0; | ||
752 | } | ||
753 | EXPORT_SYMBOL(drm_get_last_vbltimestamp); | ||
754 | |||
755 | /** | ||
363 | * drm_vblank_count - retrieve "cooked" vblank counter value | 756 | * drm_vblank_count - retrieve "cooked" vblank counter value |
364 | * @dev: DRM device | 757 | * @dev: DRM device |
365 | * @crtc: which counter to retrieve | 758 | * @crtc: which counter to retrieve |
@@ -375,6 +768,40 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc) | |||
375 | EXPORT_SYMBOL(drm_vblank_count); | 768 | EXPORT_SYMBOL(drm_vblank_count); |
376 | 769 | ||
377 | /** | 770 | /** |
771 | * drm_vblank_count_and_time - retrieve "cooked" vblank counter value | ||
772 | * and the system timestamp corresponding to that vblank counter value. | ||
773 | * | ||
774 | * @dev: DRM device | ||
775 | * @crtc: which counter to retrieve | ||
776 | * @vblanktime: Pointer to struct timeval to receive the vblank timestamp. | ||
777 | * | ||
778 | * Fetches the "cooked" vblank count value that represents the number of | ||
779 | * vblank events since the system was booted, including lost events due to | ||
780 | * modesetting activity. Returns corresponding system timestamp of the time | ||
781 | * of the vblank interval that corresponds to the current value vblank counter | ||
782 | * value. | ||
783 | */ | ||
784 | u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, | ||
785 | struct timeval *vblanktime) | ||
786 | { | ||
787 | u32 cur_vblank; | ||
788 | |||
789 | /* Read timestamp from slot of _vblank_time ringbuffer | ||
790 | * that corresponds to current vblank count. Retry if | ||
791 | * count has incremented during readout. This works like | ||
792 | * a seqlock. | ||
793 | */ | ||
794 | do { | ||
795 | cur_vblank = atomic_read(&dev->_vblank_count[crtc]); | ||
796 | *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); | ||
797 | smp_rmb(); | ||
798 | } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc])); | ||
799 | |||
800 | return cur_vblank; | ||
801 | } | ||
802 | EXPORT_SYMBOL(drm_vblank_count_and_time); | ||
803 | |||
804 | /** | ||
378 | * drm_update_vblank_count - update the master vblank counter | 805 | * drm_update_vblank_count - update the master vblank counter |
379 | * @dev: DRM device | 806 | * @dev: DRM device |
380 | * @crtc: counter to update | 807 | * @crtc: counter to update |
@@ -392,7 +819,8 @@ EXPORT_SYMBOL(drm_vblank_count); | |||
392 | */ | 819 | */ |
393 | static void drm_update_vblank_count(struct drm_device *dev, int crtc) | 820 | static void drm_update_vblank_count(struct drm_device *dev, int crtc) |
394 | { | 821 | { |
395 | u32 cur_vblank, diff; | 822 | u32 cur_vblank, diff, tslot, rc; |
823 | struct timeval t_vblank; | ||
396 | 824 | ||
397 | /* | 825 | /* |
398 | * Interrupts were disabled prior to this call, so deal with counter | 826 | * Interrupts were disabled prior to this call, so deal with counter |
@@ -400,8 +828,18 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) | |||
400 | * NOTE! It's possible we lost a full dev->max_vblank_count events | 828 | * NOTE! It's possible we lost a full dev->max_vblank_count events |
401 | * here if the register is small or we had vblank interrupts off for | 829 | * here if the register is small or we had vblank interrupts off for |
402 | * a long time. | 830 | * a long time. |
831 | * | ||
832 | * We repeat the hardware vblank counter & timestamp query until | ||
833 | * we get consistent results. This to prevent races between gpu | ||
834 | * updating its hardware counter while we are retrieving the | ||
835 | * corresponding vblank timestamp. | ||
403 | */ | 836 | */ |
404 | cur_vblank = dev->driver->get_vblank_counter(dev, crtc); | 837 | do { |
838 | cur_vblank = dev->driver->get_vblank_counter(dev, crtc); | ||
839 | rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0); | ||
840 | } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); | ||
841 | |||
842 | /* Deal with counter wrap */ | ||
405 | diff = cur_vblank - dev->last_vblank[crtc]; | 843 | diff = cur_vblank - dev->last_vblank[crtc]; |
406 | if (cur_vblank < dev->last_vblank[crtc]) { | 844 | if (cur_vblank < dev->last_vblank[crtc]) { |
407 | diff += dev->max_vblank_count; | 845 | diff += dev->max_vblank_count; |
@@ -413,6 +851,16 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) | |||
413 | DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", | 851 | DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", |
414 | crtc, diff); | 852 | crtc, diff); |
415 | 853 | ||
854 | /* Reinitialize corresponding vblank timestamp if high-precision query | ||
855 | * available. Skip this step if query unsupported or failed. Will | ||
856 | * reinitialize delayed at next vblank interrupt in that case. | ||
857 | */ | ||
858 | if (rc) { | ||
859 | tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; | ||
860 | vblanktimestamp(dev, crtc, tslot) = t_vblank; | ||
861 | smp_wmb(); | ||
862 | } | ||
863 | |||
416 | atomic_add(diff, &dev->_vblank_count[crtc]); | 864 | atomic_add(diff, &dev->_vblank_count[crtc]); |
417 | } | 865 | } |
418 | 866 | ||
@@ -429,15 +877,27 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) | |||
429 | */ | 877 | */ |
430 | int drm_vblank_get(struct drm_device *dev, int crtc) | 878 | int drm_vblank_get(struct drm_device *dev, int crtc) |
431 | { | 879 | { |
432 | unsigned long irqflags; | 880 | unsigned long irqflags, irqflags2; |
433 | int ret = 0; | 881 | int ret = 0; |
434 | 882 | ||
435 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 883 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
436 | /* Going from 0->1 means we have to enable interrupts again */ | 884 | /* Going from 0->1 means we have to enable interrupts again */ |
437 | if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { | 885 | if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { |
886 | /* Disable preemption while holding vblank_time_lock. Do | ||
887 | * it explicitely to guard against PREEMPT_RT kernel. | ||
888 | */ | ||
889 | preempt_disable(); | ||
890 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); | ||
438 | if (!dev->vblank_enabled[crtc]) { | 891 | if (!dev->vblank_enabled[crtc]) { |
892 | /* Enable vblank irqs under vblank_time_lock protection. | ||
893 | * All vblank count & timestamp updates are held off | ||
894 | * until we are done reinitializing master counter and | ||
895 | * timestamps. Filtercode in drm_handle_vblank() will | ||
896 | * prevent double-accounting of same vblank interval. | ||
897 | */ | ||
439 | ret = dev->driver->enable_vblank(dev, crtc); | 898 | ret = dev->driver->enable_vblank(dev, crtc); |
440 | DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); | 899 | DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", |
900 | crtc, ret); | ||
441 | if (ret) | 901 | if (ret) |
442 | atomic_dec(&dev->vblank_refcount[crtc]); | 902 | atomic_dec(&dev->vblank_refcount[crtc]); |
443 | else { | 903 | else { |
@@ -445,6 +905,8 @@ int drm_vblank_get(struct drm_device *dev, int crtc) | |||
445 | drm_update_vblank_count(dev, crtc); | 905 | drm_update_vblank_count(dev, crtc); |
446 | } | 906 | } |
447 | } | 907 | } |
908 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); | ||
909 | preempt_enable(); | ||
448 | } else { | 910 | } else { |
449 | if (!dev->vblank_enabled[crtc]) { | 911 | if (!dev->vblank_enabled[crtc]) { |
450 | atomic_dec(&dev->vblank_refcount[crtc]); | 912 | atomic_dec(&dev->vblank_refcount[crtc]); |
@@ -463,15 +925,17 @@ EXPORT_SYMBOL(drm_vblank_get); | |||
463 | * @crtc: which counter to give up | 925 | * @crtc: which counter to give up |
464 | * | 926 | * |
465 | * Release ownership of a given vblank counter, turning off interrupts | 927 | * Release ownership of a given vblank counter, turning off interrupts |
466 | * if possible. | 928 | * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. |
467 | */ | 929 | */ |
468 | void drm_vblank_put(struct drm_device *dev, int crtc) | 930 | void drm_vblank_put(struct drm_device *dev, int crtc) |
469 | { | 931 | { |
470 | BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0); | 932 | BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0); |
471 | 933 | ||
472 | /* Last user schedules interrupt disable */ | 934 | /* Last user schedules interrupt disable */ |
473 | if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) | 935 | if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) && |
474 | mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); | 936 | (drm_vblank_offdelay > 0)) |
937 | mod_timer(&dev->vblank_disable_timer, | ||
938 | jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000)); | ||
475 | } | 939 | } |
476 | EXPORT_SYMBOL(drm_vblank_put); | 940 | EXPORT_SYMBOL(drm_vblank_put); |
477 | 941 | ||
@@ -480,10 +944,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc) | |||
480 | unsigned long irqflags; | 944 | unsigned long irqflags; |
481 | 945 | ||
482 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 946 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
483 | dev->driver->disable_vblank(dev, crtc); | 947 | vblank_disable_and_save(dev, crtc); |
484 | DRM_WAKEUP(&dev->vbl_queue[crtc]); | 948 | DRM_WAKEUP(&dev->vbl_queue[crtc]); |
485 | dev->vblank_enabled[crtc] = 0; | ||
486 | dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); | ||
487 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | 949 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); |
488 | } | 950 | } |
489 | EXPORT_SYMBOL(drm_vblank_off); | 951 | EXPORT_SYMBOL(drm_vblank_off); |
@@ -602,7 +1064,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, | |||
602 | e->base.file_priv = file_priv; | 1064 | e->base.file_priv = file_priv; |
603 | e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; | 1065 | e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; |
604 | 1066 | ||
605 | do_gettimeofday(&now); | ||
606 | spin_lock_irqsave(&dev->event_lock, flags); | 1067 | spin_lock_irqsave(&dev->event_lock, flags); |
607 | 1068 | ||
608 | if (file_priv->event_space < sizeof e->event) { | 1069 | if (file_priv->event_space < sizeof e->event) { |
@@ -611,7 +1072,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, | |||
611 | } | 1072 | } |
612 | 1073 | ||
613 | file_priv->event_space -= sizeof e->event; | 1074 | file_priv->event_space -= sizeof e->event; |
614 | seq = drm_vblank_count(dev, pipe); | 1075 | seq = drm_vblank_count_and_time(dev, pipe, &now); |
1076 | |||
615 | if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && | 1077 | if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && |
616 | (seq - vblwait->request.sequence) <= (1 << 23)) { | 1078 | (seq - vblwait->request.sequence) <= (1 << 23)) { |
617 | vblwait->request.sequence = seq + 1; | 1079 | vblwait->request.sequence = seq + 1; |
@@ -727,11 +1189,10 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
727 | if (ret != -EINTR) { | 1189 | if (ret != -EINTR) { |
728 | struct timeval now; | 1190 | struct timeval now; |
729 | 1191 | ||
730 | do_gettimeofday(&now); | 1192 | vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now); |
731 | |||
732 | vblwait->reply.tval_sec = now.tv_sec; | 1193 | vblwait->reply.tval_sec = now.tv_sec; |
733 | vblwait->reply.tval_usec = now.tv_usec; | 1194 | vblwait->reply.tval_usec = now.tv_usec; |
734 | vblwait->reply.sequence = drm_vblank_count(dev, crtc); | 1195 | |
735 | DRM_DEBUG("returning %d to client\n", | 1196 | DRM_DEBUG("returning %d to client\n", |
736 | vblwait->reply.sequence); | 1197 | vblwait->reply.sequence); |
737 | } else { | 1198 | } else { |
@@ -750,8 +1211,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) | |||
750 | unsigned long flags; | 1211 | unsigned long flags; |
751 | unsigned int seq; | 1212 | unsigned int seq; |
752 | 1213 | ||
753 | do_gettimeofday(&now); | 1214 | seq = drm_vblank_count_and_time(dev, crtc, &now); |
754 | seq = drm_vblank_count(dev, crtc); | ||
755 | 1215 | ||
756 | spin_lock_irqsave(&dev->event_lock, flags); | 1216 | spin_lock_irqsave(&dev->event_lock, flags); |
757 | 1217 | ||
@@ -789,11 +1249,64 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) | |||
789 | */ | 1249 | */ |
790 | void drm_handle_vblank(struct drm_device *dev, int crtc) | 1250 | void drm_handle_vblank(struct drm_device *dev, int crtc) |
791 | { | 1251 | { |
1252 | u32 vblcount; | ||
1253 | s64 diff_ns; | ||
1254 | struct timeval tvblank; | ||
1255 | unsigned long irqflags; | ||
1256 | |||
792 | if (!dev->num_crtcs) | 1257 | if (!dev->num_crtcs) |
793 | return; | 1258 | return; |
794 | 1259 | ||
795 | atomic_inc(&dev->_vblank_count[crtc]); | 1260 | /* Need timestamp lock to prevent concurrent execution with |
1261 | * vblank enable/disable, as this would cause inconsistent | ||
1262 | * or corrupted timestamps and vblank counts. | ||
1263 | */ | ||
1264 | spin_lock_irqsave(&dev->vblank_time_lock, irqflags); | ||
1265 | |||
1266 | /* Vblank irq handling disabled. Nothing to do. */ | ||
1267 | if (!dev->vblank_enabled[crtc]) { | ||
1268 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); | ||
1269 | return; | ||
1270 | } | ||
1271 | |||
1272 | /* Fetch corresponding timestamp for this vblank interval from | ||
1273 | * driver and store it in proper slot of timestamp ringbuffer. | ||
1274 | */ | ||
1275 | |||
1276 | /* Get current timestamp and count. */ | ||
1277 | vblcount = atomic_read(&dev->_vblank_count[crtc]); | ||
1278 | drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); | ||
1279 | |||
1280 | /* Compute time difference to timestamp of last vblank */ | ||
1281 | diff_ns = timeval_to_ns(&tvblank) - | ||
1282 | timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); | ||
1283 | |||
1284 | /* Update vblank timestamp and count if at least | ||
1285 | * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds | ||
1286 | * difference between last stored timestamp and current | ||
1287 | * timestamp. A smaller difference means basically | ||
1288 | * identical timestamps. Happens if this vblank has | ||
1289 | * been already processed and this is a redundant call, | ||
1290 | * e.g., due to spurious vblank interrupts. We need to | ||
1291 | * ignore those for accounting. | ||
1292 | */ | ||
1293 | if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { | ||
1294 | /* Store new timestamp in ringbuffer. */ | ||
1295 | vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; | ||
1296 | smp_wmb(); | ||
1297 | |||
1298 | /* Increment cooked vblank count. This also atomically commits | ||
1299 | * the timestamp computed above. | ||
1300 | */ | ||
1301 | atomic_inc(&dev->_vblank_count[crtc]); | ||
1302 | } else { | ||
1303 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", | ||
1304 | crtc, (int) diff_ns); | ||
1305 | } | ||
1306 | |||
796 | DRM_WAKEUP(&dev->vbl_queue[crtc]); | 1307 | DRM_WAKEUP(&dev->vbl_queue[crtc]); |
797 | drm_handle_vblank_events(dev, crtc); | 1308 | drm_handle_vblank_events(dev, crtc); |
1309 | |||
1310 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); | ||
798 | } | 1311 | } |
799 | EXPORT_SYMBOL(drm_handle_vblank); | 1312 | EXPORT_SYMBOL(drm_handle_vblank); |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index cdc89ee042cc..d59edc18301f 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -40,12 +40,22 @@ | |||
40 | unsigned int drm_debug = 0; /* 1 to enable debug output */ | 40 | unsigned int drm_debug = 0; /* 1 to enable debug output */ |
41 | EXPORT_SYMBOL(drm_debug); | 41 | EXPORT_SYMBOL(drm_debug); |
42 | 42 | ||
43 | unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ | ||
44 | EXPORT_SYMBOL(drm_vblank_offdelay); | ||
45 | |||
46 | unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ | ||
47 | EXPORT_SYMBOL(drm_timestamp_precision); | ||
48 | |||
43 | MODULE_AUTHOR(CORE_AUTHOR); | 49 | MODULE_AUTHOR(CORE_AUTHOR); |
44 | MODULE_DESCRIPTION(CORE_DESC); | 50 | MODULE_DESCRIPTION(CORE_DESC); |
45 | MODULE_LICENSE("GPL and additional rights"); | 51 | MODULE_LICENSE("GPL and additional rights"); |
46 | MODULE_PARM_DESC(debug, "Enable debug output"); | 52 | MODULE_PARM_DESC(debug, "Enable debug output"); |
53 | MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); | ||
54 | MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); | ||
47 | 55 | ||
48 | module_param_named(debug, drm_debug, int, 0600); | 56 | module_param_named(debug, drm_debug, int, 0600); |
57 | module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); | ||
58 | module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); | ||
49 | 59 | ||
50 | struct idr drm_minors_idr; | 60 | struct idr drm_minors_idr; |
51 | 61 | ||
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 23fa82d667d6..b1d8941e04d8 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
@@ -5,12 +5,13 @@ | |||
5 | ccflags-y := -Iinclude/drm | 5 | ccflags-y := -Iinclude/drm |
6 | nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | 6 | nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ |
7 | nouveau_object.o nouveau_irq.o nouveau_notifier.o \ | 7 | nouveau_object.o nouveau_irq.o nouveau_notifier.o \ |
8 | nouveau_sgdma.o nouveau_dma.o \ | 8 | nouveau_sgdma.o nouveau_dma.o nouveau_util.o \ |
9 | nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ | 9 | nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ |
10 | nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ | 10 | nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ |
11 | nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ | 11 | nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ |
12 | nouveau_dp.o nouveau_ramht.o \ | 12 | nouveau_dp.o nouveau_ramht.o \ |
13 | nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ | 13 | nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ |
14 | nouveau_mm.o nouveau_vm.o \ | ||
14 | nv04_timer.o \ | 15 | nv04_timer.o \ |
15 | nv04_mc.o nv40_mc.o nv50_mc.o \ | 16 | nv04_mc.o nv40_mc.o nv50_mc.o \ |
16 | nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ | 17 | nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ |
@@ -18,14 +19,16 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | |||
18 | nv04_graph.o nv10_graph.o nv20_graph.o \ | 19 | nv04_graph.o nv10_graph.o nv20_graph.o \ |
19 | nv40_graph.o nv50_graph.o nvc0_graph.o \ | 20 | nv40_graph.o nv50_graph.o nvc0_graph.o \ |
20 | nv40_grctx.o nv50_grctx.o \ | 21 | nv40_grctx.o nv50_grctx.o \ |
22 | nv84_crypt.o \ | ||
21 | nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ | 23 | nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ |
22 | nv50_crtc.o nv50_dac.o nv50_sor.o \ | 24 | nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ |
23 | nv50_cursor.o nv50_display.o nv50_fbcon.o \ | 25 | nv50_cursor.o nv50_display.o nv50_fbcon.o \ |
24 | nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ | 26 | nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ |
25 | nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ | 27 | nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ |
26 | nv10_gpio.o nv50_gpio.o \ | 28 | nv10_gpio.o nv50_gpio.o \ |
27 | nv50_calc.o \ | 29 | nv50_calc.o \ |
28 | nv04_pm.o nv50_pm.o nva3_pm.o | 30 | nv04_pm.o nv50_pm.o nva3_pm.o \ |
31 | nv50_vram.o nv50_vm.o | ||
29 | 32 | ||
30 | nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o | 33 | nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o |
31 | nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o | 34 | nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index b2293576f278..d3046559bf05 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -6053,52 +6053,17 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb) | |||
6053 | return entry; | 6053 | return entry; |
6054 | } | 6054 | } |
6055 | 6055 | ||
6056 | static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads) | 6056 | static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c, |
6057 | int heads, int or) | ||
6057 | { | 6058 | { |
6058 | struct dcb_entry *entry = new_dcb_entry(dcb); | 6059 | struct dcb_entry *entry = new_dcb_entry(dcb); |
6059 | 6060 | ||
6060 | entry->type = 0; | 6061 | entry->type = type; |
6061 | entry->i2c_index = i2c; | 6062 | entry->i2c_index = i2c; |
6062 | entry->heads = heads; | 6063 | entry->heads = heads; |
6063 | entry->location = DCB_LOC_ON_CHIP; | 6064 | if (type != OUTPUT_ANALOG) |
6064 | entry->or = 1; | 6065 | entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ |
6065 | } | 6066 | entry->or = or; |
6066 | |||
6067 | static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads) | ||
6068 | { | ||
6069 | struct dcb_entry *entry = new_dcb_entry(dcb); | ||
6070 | |||
6071 | entry->type = 2; | ||
6072 | entry->i2c_index = LEGACY_I2C_PANEL; | ||
6073 | entry->heads = twoHeads ? 3 : 1; | ||
6074 | entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ | ||
6075 | entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */ | ||
6076 | entry->duallink_possible = false; /* SiI164 and co. are single link */ | ||
6077 | |||
6078 | #if 0 | ||
6079 | /* | ||
6080 | * For dvi-a either crtc probably works, but my card appears to only | ||
6081 | * support dvi-d. "nvidia" still attempts to program it for dvi-a, | ||
6082 | * doing the full fp output setup (program 0x6808.. fp dimension regs, | ||
6083 | * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880); | ||
6084 | * the monitor picks up the mode res ok and lights up, but no pixel | ||
6085 | * data appears, so the board manufacturer probably connected up the | ||
6086 | * sync lines, but missed the video traces / components | ||
6087 | * | ||
6088 | * with this introduction, dvi-a left as an exercise for the reader. | ||
6089 | */ | ||
6090 | fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads); | ||
6091 | #endif | ||
6092 | } | ||
6093 | |||
6094 | static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads) | ||
6095 | { | ||
6096 | struct dcb_entry *entry = new_dcb_entry(dcb); | ||
6097 | |||
6098 | entry->type = 1; | ||
6099 | entry->i2c_index = LEGACY_I2C_TV; | ||
6100 | entry->heads = twoHeads ? 3 : 1; | ||
6101 | entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ | ||
6102 | } | 6067 | } |
6103 | 6068 | ||
6104 | static bool | 6069 | static bool |
@@ -6365,8 +6330,36 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) | |||
6365 | return true; | 6330 | return true; |
6366 | } | 6331 | } |
6367 | 6332 | ||
6333 | static void | ||
6334 | fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios) | ||
6335 | { | ||
6336 | struct dcb_table *dcb = &bios->dcb; | ||
6337 | int all_heads = (nv_two_heads(dev) ? 3 : 1); | ||
6338 | |||
6339 | #ifdef __powerpc__ | ||
6340 | /* Apple iMac G4 NV17 */ | ||
6341 | if (of_machine_is_compatible("PowerMac4,5")) { | ||
6342 | fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1); | ||
6343 | fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2); | ||
6344 | return; | ||
6345 | } | ||
6346 | #endif | ||
6347 | |||
6348 | /* Make up some sane defaults */ | ||
6349 | fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1); | ||
6350 | |||
6351 | if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) | ||
6352 | fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV, | ||
6353 | all_heads, 0); | ||
6354 | |||
6355 | else if (bios->tmds.output0_script_ptr || | ||
6356 | bios->tmds.output1_script_ptr) | ||
6357 | fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL, | ||
6358 | all_heads, 1); | ||
6359 | } | ||
6360 | |||
6368 | static int | 6361 | static int |
6369 | parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) | 6362 | parse_dcb_table(struct drm_device *dev, struct nvbios *bios) |
6370 | { | 6363 | { |
6371 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 6364 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
6372 | struct dcb_table *dcb = &bios->dcb; | 6365 | struct dcb_table *dcb = &bios->dcb; |
@@ -6386,12 +6379,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) | |||
6386 | 6379 | ||
6387 | /* this situation likely means a really old card, pre DCB */ | 6380 | /* this situation likely means a really old card, pre DCB */ |
6388 | if (dcbptr == 0x0) { | 6381 | if (dcbptr == 0x0) { |
6389 | NV_INFO(dev, "Assuming a CRT output exists\n"); | 6382 | fabricate_dcb_encoder_table(dev, bios); |
6390 | fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); | ||
6391 | |||
6392 | if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) | ||
6393 | fabricate_tv_output(dcb, twoHeads); | ||
6394 | |||
6395 | return 0; | 6383 | return 0; |
6396 | } | 6384 | } |
6397 | 6385 | ||
@@ -6451,21 +6439,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) | |||
6451 | */ | 6439 | */ |
6452 | NV_TRACEWARN(dev, "No useful information in BIOS output table; " | 6440 | NV_TRACEWARN(dev, "No useful information in BIOS output table; " |
6453 | "adding all possible outputs\n"); | 6441 | "adding all possible outputs\n"); |
6454 | fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); | 6442 | fabricate_dcb_encoder_table(dev, bios); |
6455 | |||
6456 | /* | ||
6457 | * Attempt to detect TV before DVI because the test | ||
6458 | * for the former is more accurate and it rules the | ||
6459 | * latter out. | ||
6460 | */ | ||
6461 | if (nv04_tv_identify(dev, | ||
6462 | bios->legacy.i2c_indices.tv) >= 0) | ||
6463 | fabricate_tv_output(dcb, twoHeads); | ||
6464 | |||
6465 | else if (bios->tmds.output0_script_ptr || | ||
6466 | bios->tmds.output1_script_ptr) | ||
6467 | fabricate_dvi_i_output(dcb, twoHeads); | ||
6468 | |||
6469 | return 0; | 6443 | return 0; |
6470 | } | 6444 | } |
6471 | 6445 | ||
@@ -6859,7 +6833,7 @@ nouveau_bios_init(struct drm_device *dev) | |||
6859 | if (ret) | 6833 | if (ret) |
6860 | return ret; | 6834 | return ret; |
6861 | 6835 | ||
6862 | ret = parse_dcb_table(dev, bios, nv_two_heads(dev)); | 6836 | ret = parse_dcb_table(dev, bios); |
6863 | if (ret) | 6837 | if (ret) |
6864 | return ret; | 6838 | return ret; |
6865 | 6839 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c41e1c200ef5..42d1ad62b381 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include "nouveau_drm.h" | 32 | #include "nouveau_drm.h" |
33 | #include "nouveau_drv.h" | 33 | #include "nouveau_drv.h" |
34 | #include "nouveau_dma.h" | 34 | #include "nouveau_dma.h" |
35 | #include "nouveau_mm.h" | ||
36 | #include "nouveau_vm.h" | ||
35 | 37 | ||
36 | #include <linux/log2.h> | 38 | #include <linux/log2.h> |
37 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
@@ -46,82 +48,51 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
46 | if (unlikely(nvbo->gem)) | 48 | if (unlikely(nvbo->gem)) |
47 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
48 | 50 | ||
49 | if (nvbo->tile) | 51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); |
50 | nv10_mem_expire_tiling(dev, nvbo->tile, NULL); | 52 | nouveau_vm_put(&nvbo->vma); |
51 | |||
52 | kfree(nvbo); | 53 | kfree(nvbo); |
53 | } | 54 | } |
54 | 55 | ||
55 | static void | 56 | static void |
56 | nouveau_bo_fixup_align(struct drm_device *dev, | 57 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size, |
57 | uint32_t tile_mode, uint32_t tile_flags, | 58 | int *page_shift) |
58 | int *align, int *size) | ||
59 | { | 59 | { |
60 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 60 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); |
61 | |||
62 | /* | ||
63 | * Some of the tile_flags have a periodic structure of N*4096 bytes, | ||
64 | * align to to that as well as the page size. Align the size to the | ||
65 | * appropriate boundaries. This does imply that sizes are rounded up | ||
66 | * 3-7 pages, so be aware of this and do not waste memory by allocating | ||
67 | * many small buffers. | ||
68 | */ | ||
69 | if (dev_priv->card_type == NV_50) { | ||
70 | uint32_t block_size = dev_priv->vram_size >> 15; | ||
71 | int i; | ||
72 | |||
73 | switch (tile_flags) { | ||
74 | case 0x1800: | ||
75 | case 0x2800: | ||
76 | case 0x4800: | ||
77 | case 0x7a00: | ||
78 | if (is_power_of_2(block_size)) { | ||
79 | for (i = 1; i < 10; i++) { | ||
80 | *align = 12 * i * block_size; | ||
81 | if (!(*align % 65536)) | ||
82 | break; | ||
83 | } | ||
84 | } else { | ||
85 | for (i = 1; i < 10; i++) { | ||
86 | *align = 8 * i * block_size; | ||
87 | if (!(*align % 65536)) | ||
88 | break; | ||
89 | } | ||
90 | } | ||
91 | *size = roundup(*size, *align); | ||
92 | break; | ||
93 | default: | ||
94 | break; | ||
95 | } | ||
96 | 61 | ||
97 | } else { | 62 | if (dev_priv->card_type < NV_50) { |
98 | if (tile_mode) { | 63 | if (nvbo->tile_mode) { |
99 | if (dev_priv->chipset >= 0x40) { | 64 | if (dev_priv->chipset >= 0x40) { |
100 | *align = 65536; | 65 | *align = 65536; |
101 | *size = roundup(*size, 64 * tile_mode); | 66 | *size = roundup(*size, 64 * nvbo->tile_mode); |
102 | 67 | ||
103 | } else if (dev_priv->chipset >= 0x30) { | 68 | } else if (dev_priv->chipset >= 0x30) { |
104 | *align = 32768; | 69 | *align = 32768; |
105 | *size = roundup(*size, 64 * tile_mode); | 70 | *size = roundup(*size, 64 * nvbo->tile_mode); |
106 | 71 | ||
107 | } else if (dev_priv->chipset >= 0x20) { | 72 | } else if (dev_priv->chipset >= 0x20) { |
108 | *align = 16384; | 73 | *align = 16384; |
109 | *size = roundup(*size, 64 * tile_mode); | 74 | *size = roundup(*size, 64 * nvbo->tile_mode); |
110 | 75 | ||
111 | } else if (dev_priv->chipset >= 0x10) { | 76 | } else if (dev_priv->chipset >= 0x10) { |
112 | *align = 16384; | 77 | *align = 16384; |
113 | *size = roundup(*size, 32 * tile_mode); | 78 | *size = roundup(*size, 32 * nvbo->tile_mode); |
114 | } | 79 | } |
115 | } | 80 | } |
81 | } else { | ||
82 | if (likely(dev_priv->chan_vm)) { | ||
83 | if (*size > 256 * 1024) | ||
84 | *page_shift = dev_priv->chan_vm->lpg_shift; | ||
85 | else | ||
86 | *page_shift = dev_priv->chan_vm->spg_shift; | ||
87 | } else { | ||
88 | *page_shift = 12; | ||
89 | } | ||
90 | |||
91 | *size = roundup(*size, (1 << *page_shift)); | ||
92 | *align = max((1 << *page_shift), *align); | ||
116 | } | 93 | } |
117 | 94 | ||
118 | /* ALIGN works only on powers of two. */ | ||
119 | *size = roundup(*size, PAGE_SIZE); | 95 | *size = roundup(*size, PAGE_SIZE); |
120 | |||
121 | if (dev_priv->card_type == NV_50) { | ||
122 | *size = roundup(*size, 65536); | ||
123 | *align = max(65536, *align); | ||
124 | } | ||
125 | } | 96 | } |
126 | 97 | ||
127 | int | 98 | int |
@@ -132,7 +103,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
132 | { | 103 | { |
133 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 104 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
134 | struct nouveau_bo *nvbo; | 105 | struct nouveau_bo *nvbo; |
135 | int ret = 0; | 106 | int ret = 0, page_shift = 0; |
136 | 107 | ||
137 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); | 108 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); |
138 | if (!nvbo) | 109 | if (!nvbo) |
@@ -145,10 +116,18 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
145 | nvbo->tile_flags = tile_flags; | 116 | nvbo->tile_flags = tile_flags; |
146 | nvbo->bo.bdev = &dev_priv->ttm.bdev; | 117 | nvbo->bo.bdev = &dev_priv->ttm.bdev; |
147 | 118 | ||
148 | nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), | 119 | nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift); |
149 | &align, &size); | ||
150 | align >>= PAGE_SHIFT; | 120 | align >>= PAGE_SHIFT; |
151 | 121 | ||
122 | if (!nvbo->no_vm && dev_priv->chan_vm) { | ||
123 | ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, | ||
124 | NV_MEM_ACCESS_RW, &nvbo->vma); | ||
125 | if (ret) { | ||
126 | kfree(nvbo); | ||
127 | return ret; | ||
128 | } | ||
129 | } | ||
130 | |||
152 | nouveau_bo_placement_set(nvbo, flags, 0); | 131 | nouveau_bo_placement_set(nvbo, flags, 0); |
153 | 132 | ||
154 | nvbo->channel = chan; | 133 | nvbo->channel = chan; |
@@ -161,6 +140,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
161 | } | 140 | } |
162 | nvbo->channel = NULL; | 141 | nvbo->channel = NULL; |
163 | 142 | ||
143 | if (nvbo->vma.node) { | ||
144 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | ||
145 | nvbo->bo.offset = nvbo->vma.offset; | ||
146 | } | ||
147 | |||
164 | *pnvbo = nvbo; | 148 | *pnvbo = nvbo; |
165 | return 0; | 149 | return 0; |
166 | } | 150 | } |
@@ -244,7 +228,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | |||
244 | 228 | ||
245 | nouveau_bo_placement_set(nvbo, memtype, 0); | 229 | nouveau_bo_placement_set(nvbo, memtype, 0); |
246 | 230 | ||
247 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); | 231 | ret = nouveau_bo_validate(nvbo, false, false, false); |
248 | if (ret == 0) { | 232 | if (ret == 0) { |
249 | switch (bo->mem.mem_type) { | 233 | switch (bo->mem.mem_type) { |
250 | case TTM_PL_VRAM: | 234 | case TTM_PL_VRAM: |
@@ -280,7 +264,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) | |||
280 | 264 | ||
281 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); | 265 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
282 | 266 | ||
283 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); | 267 | ret = nouveau_bo_validate(nvbo, false, false, false); |
284 | if (ret == 0) { | 268 | if (ret == 0) { |
285 | switch (bo->mem.mem_type) { | 269 | switch (bo->mem.mem_type) { |
286 | case TTM_PL_VRAM: | 270 | case TTM_PL_VRAM: |
@@ -319,6 +303,25 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo) | |||
319 | ttm_bo_kunmap(&nvbo->kmap); | 303 | ttm_bo_kunmap(&nvbo->kmap); |
320 | } | 304 | } |
321 | 305 | ||
306 | int | ||
307 | nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, | ||
308 | bool no_wait_reserve, bool no_wait_gpu) | ||
309 | { | ||
310 | int ret; | ||
311 | |||
312 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, | ||
313 | no_wait_reserve, no_wait_gpu); | ||
314 | if (ret) | ||
315 | return ret; | ||
316 | |||
317 | if (nvbo->vma.node) { | ||
318 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | ||
319 | nvbo->bo.offset = nvbo->vma.offset; | ||
320 | } | ||
321 | |||
322 | return 0; | ||
323 | } | ||
324 | |||
322 | u16 | 325 | u16 |
323 | nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) | 326 | nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) |
324 | { | 327 | { |
@@ -410,37 +413,40 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
410 | man->default_caching = TTM_PL_FLAG_CACHED; | 413 | man->default_caching = TTM_PL_FLAG_CACHED; |
411 | break; | 414 | break; |
412 | case TTM_PL_VRAM: | 415 | case TTM_PL_VRAM: |
413 | man->func = &ttm_bo_manager_func; | 416 | if (dev_priv->card_type == NV_50) { |
417 | man->func = &nouveau_vram_manager; | ||
418 | man->io_reserve_fastpath = false; | ||
419 | man->use_io_reserve_lru = true; | ||
420 | } else { | ||
421 | man->func = &ttm_bo_manager_func; | ||
422 | } | ||
414 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | 423 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
415 | TTM_MEMTYPE_FLAG_MAPPABLE; | 424 | TTM_MEMTYPE_FLAG_MAPPABLE; |
416 | man->available_caching = TTM_PL_FLAG_UNCACHED | | 425 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
417 | TTM_PL_FLAG_WC; | 426 | TTM_PL_FLAG_WC; |
418 | man->default_caching = TTM_PL_FLAG_WC; | 427 | man->default_caching = TTM_PL_FLAG_WC; |
419 | if (dev_priv->card_type == NV_50) | ||
420 | man->gpu_offset = 0x40000000; | ||
421 | else | ||
422 | man->gpu_offset = 0; | ||
423 | break; | 428 | break; |
424 | case TTM_PL_TT: | 429 | case TTM_PL_TT: |
425 | man->func = &ttm_bo_manager_func; | 430 | man->func = &ttm_bo_manager_func; |
426 | switch (dev_priv->gart_info.type) { | 431 | switch (dev_priv->gart_info.type) { |
427 | case NOUVEAU_GART_AGP: | 432 | case NOUVEAU_GART_AGP: |
428 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | 433 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
429 | man->available_caching = TTM_PL_FLAG_UNCACHED; | 434 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
430 | man->default_caching = TTM_PL_FLAG_UNCACHED; | 435 | TTM_PL_FLAG_WC; |
436 | man->default_caching = TTM_PL_FLAG_WC; | ||
431 | break; | 437 | break; |
432 | case NOUVEAU_GART_SGDMA: | 438 | case NOUVEAU_GART_SGDMA: |
433 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | | 439 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | |
434 | TTM_MEMTYPE_FLAG_CMA; | 440 | TTM_MEMTYPE_FLAG_CMA; |
435 | man->available_caching = TTM_PL_MASK_CACHING; | 441 | man->available_caching = TTM_PL_MASK_CACHING; |
436 | man->default_caching = TTM_PL_FLAG_CACHED; | 442 | man->default_caching = TTM_PL_FLAG_CACHED; |
443 | man->gpu_offset = dev_priv->gart_info.aper_base; | ||
437 | break; | 444 | break; |
438 | default: | 445 | default: |
439 | NV_ERROR(dev, "Unknown GART type: %d\n", | 446 | NV_ERROR(dev, "Unknown GART type: %d\n", |
440 | dev_priv->gart_info.type); | 447 | dev_priv->gart_info.type); |
441 | return -EINVAL; | 448 | return -EINVAL; |
442 | } | 449 | } |
443 | man->gpu_offset = dev_priv->vm_gart_base; | ||
444 | break; | 450 | break; |
445 | default: | 451 | default: |
446 | NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); | 452 | NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); |
@@ -485,16 +491,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | |||
485 | if (ret) | 491 | if (ret) |
486 | return ret; | 492 | return ret; |
487 | 493 | ||
488 | if (nvbo->channel) { | ||
489 | ret = nouveau_fence_sync(fence, nvbo->channel); | ||
490 | if (ret) | ||
491 | goto out; | ||
492 | } | ||
493 | |||
494 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, | 494 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, |
495 | no_wait_reserve, no_wait_gpu, new_mem); | 495 | no_wait_reserve, no_wait_gpu, new_mem); |
496 | out: | 496 | nouveau_fence_unref(&fence); |
497 | nouveau_fence_unref((void *)&fence); | ||
498 | return ret; | 497 | return ret; |
499 | } | 498 | } |
500 | 499 | ||
@@ -529,14 +528,14 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
529 | dst_offset = new_mem->start << PAGE_SHIFT; | 528 | dst_offset = new_mem->start << PAGE_SHIFT; |
530 | if (!nvbo->no_vm) { | 529 | if (!nvbo->no_vm) { |
531 | if (old_mem->mem_type == TTM_PL_VRAM) | 530 | if (old_mem->mem_type == TTM_PL_VRAM) |
532 | src_offset += dev_priv->vm_vram_base; | 531 | src_offset = nvbo->vma.offset; |
533 | else | 532 | else |
534 | src_offset += dev_priv->vm_gart_base; | 533 | src_offset += dev_priv->gart_info.aper_base; |
535 | 534 | ||
536 | if (new_mem->mem_type == TTM_PL_VRAM) | 535 | if (new_mem->mem_type == TTM_PL_VRAM) |
537 | dst_offset += dev_priv->vm_vram_base; | 536 | dst_offset = nvbo->vma.offset; |
538 | else | 537 | else |
539 | dst_offset += dev_priv->vm_gart_base; | 538 | dst_offset += dev_priv->gart_info.aper_base; |
540 | } | 539 | } |
541 | 540 | ||
542 | ret = RING_SPACE(chan, 3); | 541 | ret = RING_SPACE(chan, 3); |
@@ -683,17 +682,24 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
683 | int ret; | 682 | int ret; |
684 | 683 | ||
685 | chan = nvbo->channel; | 684 | chan = nvbo->channel; |
686 | if (!chan || nvbo->no_vm) | 685 | if (!chan || nvbo->no_vm) { |
687 | chan = dev_priv->channel; | 686 | chan = dev_priv->channel; |
687 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); | ||
688 | } | ||
688 | 689 | ||
689 | if (dev_priv->card_type < NV_50) | 690 | if (dev_priv->card_type < NV_50) |
690 | ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); | 691 | ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); |
691 | else | 692 | else |
692 | ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); | 693 | ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); |
693 | if (ret) | 694 | if (ret == 0) { |
694 | return ret; | 695 | ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, |
696 | no_wait_reserve, | ||
697 | no_wait_gpu, new_mem); | ||
698 | } | ||
695 | 699 | ||
696 | return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); | 700 | if (chan == dev_priv->channel) |
701 | mutex_unlock(&chan->mutex); | ||
702 | return ret; | ||
697 | } | 703 | } |
698 | 704 | ||
699 | static int | 705 | static int |
@@ -771,7 +777,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | |||
771 | struct drm_device *dev = dev_priv->dev; | 777 | struct drm_device *dev = dev_priv->dev; |
772 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 778 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
773 | uint64_t offset; | 779 | uint64_t offset; |
774 | int ret; | ||
775 | 780 | ||
776 | if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { | 781 | if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { |
777 | /* Nothing to do. */ | 782 | /* Nothing to do. */ |
@@ -781,18 +786,12 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | |||
781 | 786 | ||
782 | offset = new_mem->start << PAGE_SHIFT; | 787 | offset = new_mem->start << PAGE_SHIFT; |
783 | 788 | ||
784 | if (dev_priv->card_type == NV_50) { | 789 | if (dev_priv->chan_vm) { |
785 | ret = nv50_mem_vm_bind_linear(dev, | 790 | nouveau_vm_map(&nvbo->vma, new_mem->mm_node); |
786 | offset + dev_priv->vm_vram_base, | ||
787 | new_mem->size, | ||
788 | nouveau_bo_tile_layout(nvbo), | ||
789 | offset); | ||
790 | if (ret) | ||
791 | return ret; | ||
792 | |||
793 | } else if (dev_priv->card_type >= NV_10) { | 791 | } else if (dev_priv->card_type >= NV_10) { |
794 | *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, | 792 | *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, |
795 | nvbo->tile_mode); | 793 | nvbo->tile_mode, |
794 | nvbo->tile_flags); | ||
796 | } | 795 | } |
797 | 796 | ||
798 | return 0; | 797 | return 0; |
@@ -808,9 +807,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |||
808 | 807 | ||
809 | if (dev_priv->card_type >= NV_10 && | 808 | if (dev_priv->card_type >= NV_10 && |
810 | dev_priv->card_type < NV_50) { | 809 | dev_priv->card_type < NV_50) { |
811 | if (*old_tile) | 810 | nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj); |
812 | nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj); | ||
813 | |||
814 | *old_tile = new_tile; | 811 | *old_tile = new_tile; |
815 | } | 812 | } |
816 | } | 813 | } |
@@ -879,6 +876,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
879 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | 876 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
880 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | 877 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); |
881 | struct drm_device *dev = dev_priv->dev; | 878 | struct drm_device *dev = dev_priv->dev; |
879 | int ret; | ||
882 | 880 | ||
883 | mem->bus.addr = NULL; | 881 | mem->bus.addr = NULL; |
884 | mem->bus.offset = 0; | 882 | mem->bus.offset = 0; |
@@ -901,9 +899,32 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
901 | #endif | 899 | #endif |
902 | break; | 900 | break; |
903 | case TTM_PL_VRAM: | 901 | case TTM_PL_VRAM: |
904 | mem->bus.offset = mem->start << PAGE_SHIFT; | 902 | { |
903 | struct nouveau_vram *vram = mem->mm_node; | ||
904 | |||
905 | if (!dev_priv->bar1_vm) { | ||
906 | mem->bus.offset = mem->start << PAGE_SHIFT; | ||
907 | mem->bus.base = pci_resource_start(dev->pdev, 1); | ||
908 | mem->bus.is_iomem = true; | ||
909 | break; | ||
910 | } | ||
911 | |||
912 | ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 12, | ||
913 | NV_MEM_ACCESS_RW, &vram->bar_vma); | ||
914 | if (ret) | ||
915 | return ret; | ||
916 | |||
917 | nouveau_vm_map(&vram->bar_vma, vram); | ||
918 | if (ret) { | ||
919 | nouveau_vm_put(&vram->bar_vma); | ||
920 | return ret; | ||
921 | } | ||
922 | |||
923 | mem->bus.offset = vram->bar_vma.offset; | ||
924 | mem->bus.offset -= 0x0020000000ULL; | ||
905 | mem->bus.base = pci_resource_start(dev->pdev, 1); | 925 | mem->bus.base = pci_resource_start(dev->pdev, 1); |
906 | mem->bus.is_iomem = true; | 926 | mem->bus.is_iomem = true; |
927 | } | ||
907 | break; | 928 | break; |
908 | default: | 929 | default: |
909 | return -EINVAL; | 930 | return -EINVAL; |
@@ -914,6 +935,17 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
914 | static void | 935 | static void |
915 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 936 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
916 | { | 937 | { |
938 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | ||
939 | struct nouveau_vram *vram = mem->mm_node; | ||
940 | |||
941 | if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) | ||
942 | return; | ||
943 | |||
944 | if (!vram->bar_vma.node) | ||
945 | return; | ||
946 | |||
947 | nouveau_vm_unmap(&vram->bar_vma); | ||
948 | nouveau_vm_put(&vram->bar_vma); | ||
917 | } | 949 | } |
918 | 950 | ||
919 | static int | 951 | static int |
@@ -939,7 +971,23 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
939 | nvbo->placement.fpfn = 0; | 971 | nvbo->placement.fpfn = 0; |
940 | nvbo->placement.lpfn = dev_priv->fb_mappable_pages; | 972 | nvbo->placement.lpfn = dev_priv->fb_mappable_pages; |
941 | nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); | 973 | nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); |
942 | return ttm_bo_validate(bo, &nvbo->placement, false, true, false); | 974 | return nouveau_bo_validate(nvbo, false, true, false); |
975 | } | ||
976 | |||
977 | void | ||
978 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) | ||
979 | { | ||
980 | struct nouveau_fence *old_fence; | ||
981 | |||
982 | if (likely(fence)) | ||
983 | nouveau_fence_ref(fence); | ||
984 | |||
985 | spin_lock(&nvbo->bo.bdev->fence_lock); | ||
986 | old_fence = nvbo->bo.sync_obj; | ||
987 | nvbo->bo.sync_obj = fence; | ||
988 | spin_unlock(&nvbo->bo.bdev->fence_lock); | ||
989 | |||
990 | nouveau_fence_unref(&old_fence); | ||
943 | } | 991 | } |
944 | 992 | ||
945 | struct ttm_bo_driver nouveau_bo_driver = { | 993 | struct ttm_bo_driver nouveau_bo_driver = { |
@@ -949,11 +997,11 @@ struct ttm_bo_driver nouveau_bo_driver = { | |||
949 | .evict_flags = nouveau_bo_evict_flags, | 997 | .evict_flags = nouveau_bo_evict_flags, |
950 | .move = nouveau_bo_move, | 998 | .move = nouveau_bo_move, |
951 | .verify_access = nouveau_bo_verify_access, | 999 | .verify_access = nouveau_bo_verify_access, |
952 | .sync_obj_signaled = nouveau_fence_signalled, | 1000 | .sync_obj_signaled = __nouveau_fence_signalled, |
953 | .sync_obj_wait = nouveau_fence_wait, | 1001 | .sync_obj_wait = __nouveau_fence_wait, |
954 | .sync_obj_flush = nouveau_fence_flush, | 1002 | .sync_obj_flush = __nouveau_fence_flush, |
955 | .sync_obj_unref = nouveau_fence_unref, | 1003 | .sync_obj_unref = __nouveau_fence_unref, |
956 | .sync_obj_ref = nouveau_fence_ref, | 1004 | .sync_obj_ref = __nouveau_fence_ref, |
957 | .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, | 1005 | .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, |
958 | .io_mem_reserve = &nouveau_ttm_io_mem_reserve, | 1006 | .io_mem_reserve = &nouveau_ttm_io_mem_reserve, |
959 | .io_mem_free = &nouveau_ttm_io_mem_free, | 1007 | .io_mem_free = &nouveau_ttm_io_mem_free, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 373950e34814..6f37995aee2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -39,22 +39,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) | |||
39 | 39 | ||
40 | if (dev_priv->card_type >= NV_50) { | 40 | if (dev_priv->card_type >= NV_50) { |
41 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | 41 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, |
42 | dev_priv->vm_end, NV_DMA_ACCESS_RO, | 42 | (1ULL << 40), NV_MEM_ACCESS_RO, |
43 | NV_DMA_TARGET_AGP, &pushbuf); | 43 | NV_MEM_TARGET_VM, &pushbuf); |
44 | chan->pushbuf_base = pb->bo.offset; | 44 | chan->pushbuf_base = pb->bo.offset; |
45 | } else | 45 | } else |
46 | if (pb->bo.mem.mem_type == TTM_PL_TT) { | 46 | if (pb->bo.mem.mem_type == TTM_PL_TT) { |
47 | ret = nouveau_gpuobj_gart_dma_new(chan, 0, | 47 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, |
48 | dev_priv->gart_info.aper_size, | 48 | dev_priv->gart_info.aper_size, |
49 | NV_DMA_ACCESS_RO, &pushbuf, | 49 | NV_MEM_ACCESS_RO, |
50 | NULL); | 50 | NV_MEM_TARGET_GART, &pushbuf); |
51 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; | 51 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; |
52 | } else | 52 | } else |
53 | if (dev_priv->card_type != NV_04) { | 53 | if (dev_priv->card_type != NV_04) { |
54 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | 54 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, |
55 | dev_priv->fb_available_size, | 55 | dev_priv->fb_available_size, |
56 | NV_DMA_ACCESS_RO, | 56 | NV_MEM_ACCESS_RO, |
57 | NV_DMA_TARGET_VIDMEM, &pushbuf); | 57 | NV_MEM_TARGET_VRAM, &pushbuf); |
58 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; | 58 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; |
59 | } else { | 59 | } else { |
60 | /* NV04 cmdbuf hack, from original ddx.. not sure of it's | 60 | /* NV04 cmdbuf hack, from original ddx.. not sure of it's |
@@ -62,11 +62,10 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) | |||
62 | * VRAM. | 62 | * VRAM. |
63 | */ | 63 | */ |
64 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 64 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
65 | pci_resource_start(dev->pdev, | 65 | pci_resource_start(dev->pdev, 1), |
66 | 1), | ||
67 | dev_priv->fb_available_size, | 66 | dev_priv->fb_available_size, |
68 | NV_DMA_ACCESS_RO, | 67 | NV_MEM_ACCESS_RO, |
69 | NV_DMA_TARGET_PCI, &pushbuf); | 68 | NV_MEM_TARGET_PCI, &pushbuf); |
70 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; | 69 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; |
71 | } | 70 | } |
72 | 71 | ||
@@ -107,74 +106,60 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) | |||
107 | int | 106 | int |
108 | nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | 107 | nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, |
109 | struct drm_file *file_priv, | 108 | struct drm_file *file_priv, |
110 | uint32_t vram_handle, uint32_t tt_handle) | 109 | uint32_t vram_handle, uint32_t gart_handle) |
111 | { | 110 | { |
112 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 111 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
113 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | 112 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
114 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 113 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; |
115 | struct nouveau_channel *chan; | 114 | struct nouveau_channel *chan; |
116 | int channel, user; | 115 | unsigned long flags; |
117 | int ret; | 116 | int ret; |
118 | 117 | ||
119 | /* | 118 | /* allocate and lock channel structure */ |
120 | * Alright, here is the full story | 119 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); |
121 | * Nvidia cards have multiple hw fifo contexts (praise them for that, | 120 | if (!chan) |
122 | * no complicated crash-prone context switches) | 121 | return -ENOMEM; |
123 | * We allocate a new context for each app and let it write to it | 122 | chan->dev = dev; |
124 | * directly (woo, full userspace command submission !) | 123 | chan->file_priv = file_priv; |
125 | * When there are no more contexts, you lost | 124 | chan->vram_handle = vram_handle; |
126 | */ | 125 | chan->gart_handle = gart_handle; |
127 | for (channel = 0; channel < pfifo->channels; channel++) { | 126 | |
128 | if (dev_priv->fifos[channel] == NULL) | 127 | kref_init(&chan->ref); |
128 | atomic_set(&chan->users, 1); | ||
129 | mutex_init(&chan->mutex); | ||
130 | mutex_lock(&chan->mutex); | ||
131 | |||
132 | /* allocate hw channel id */ | ||
133 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
134 | for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { | ||
135 | if (!dev_priv->channels.ptr[chan->id]) { | ||
136 | nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]); | ||
129 | break; | 137 | break; |
138 | } | ||
130 | } | 139 | } |
140 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
131 | 141 | ||
132 | /* no more fifos. you lost. */ | 142 | if (chan->id == pfifo->channels) { |
133 | if (channel == pfifo->channels) | 143 | mutex_unlock(&chan->mutex); |
134 | return -EINVAL; | 144 | kfree(chan); |
145 | return -ENODEV; | ||
146 | } | ||
135 | 147 | ||
136 | dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel), | 148 | NV_DEBUG(dev, "initialising channel %d\n", chan->id); |
137 | GFP_KERNEL); | ||
138 | if (!dev_priv->fifos[channel]) | ||
139 | return -ENOMEM; | ||
140 | chan = dev_priv->fifos[channel]; | ||
141 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); | 149 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); |
150 | INIT_LIST_HEAD(&chan->nvsw.flip); | ||
142 | INIT_LIST_HEAD(&chan->fence.pending); | 151 | INIT_LIST_HEAD(&chan->fence.pending); |
143 | chan->dev = dev; | ||
144 | chan->id = channel; | ||
145 | chan->file_priv = file_priv; | ||
146 | chan->vram_handle = vram_handle; | ||
147 | chan->gart_handle = tt_handle; | ||
148 | |||
149 | NV_INFO(dev, "Allocating FIFO number %d\n", channel); | ||
150 | 152 | ||
151 | /* Allocate DMA push buffer */ | 153 | /* Allocate DMA push buffer */ |
152 | chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); | 154 | chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); |
153 | if (!chan->pushbuf_bo) { | 155 | if (!chan->pushbuf_bo) { |
154 | ret = -ENOMEM; | 156 | ret = -ENOMEM; |
155 | NV_ERROR(dev, "pushbuf %d\n", ret); | 157 | NV_ERROR(dev, "pushbuf %d\n", ret); |
156 | nouveau_channel_free(chan); | 158 | nouveau_channel_put(&chan); |
157 | return ret; | 159 | return ret; |
158 | } | 160 | } |
159 | 161 | ||
160 | nouveau_dma_pre_init(chan); | 162 | nouveau_dma_pre_init(chan); |
161 | |||
162 | /* Locate channel's user control regs */ | ||
163 | if (dev_priv->card_type < NV_40) | ||
164 | user = NV03_USER(channel); | ||
165 | else | ||
166 | if (dev_priv->card_type < NV_50) | ||
167 | user = NV40_USER(channel); | ||
168 | else | ||
169 | user = NV50_USER(channel); | ||
170 | |||
171 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user, | ||
172 | PAGE_SIZE); | ||
173 | if (!chan->user) { | ||
174 | NV_ERROR(dev, "ioremap of regs failed.\n"); | ||
175 | nouveau_channel_free(chan); | ||
176 | return -ENOMEM; | ||
177 | } | ||
178 | chan->user_put = 0x40; | 163 | chan->user_put = 0x40; |
179 | chan->user_get = 0x44; | 164 | chan->user_get = 0x44; |
180 | 165 | ||
@@ -182,15 +167,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
182 | ret = nouveau_notifier_init_channel(chan); | 167 | ret = nouveau_notifier_init_channel(chan); |
183 | if (ret) { | 168 | if (ret) { |
184 | NV_ERROR(dev, "ntfy %d\n", ret); | 169 | NV_ERROR(dev, "ntfy %d\n", ret); |
185 | nouveau_channel_free(chan); | 170 | nouveau_channel_put(&chan); |
186 | return ret; | 171 | return ret; |
187 | } | 172 | } |
188 | 173 | ||
189 | /* Setup channel's default objects */ | 174 | /* Setup channel's default objects */ |
190 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); | 175 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); |
191 | if (ret) { | 176 | if (ret) { |
192 | NV_ERROR(dev, "gpuobj %d\n", ret); | 177 | NV_ERROR(dev, "gpuobj %d\n", ret); |
193 | nouveau_channel_free(chan); | 178 | nouveau_channel_put(&chan); |
194 | return ret; | 179 | return ret; |
195 | } | 180 | } |
196 | 181 | ||
@@ -198,7 +183,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
198 | ret = nouveau_channel_pushbuf_ctxdma_init(chan); | 183 | ret = nouveau_channel_pushbuf_ctxdma_init(chan); |
199 | if (ret) { | 184 | if (ret) { |
200 | NV_ERROR(dev, "pbctxdma %d\n", ret); | 185 | NV_ERROR(dev, "pbctxdma %d\n", ret); |
201 | nouveau_channel_free(chan); | 186 | nouveau_channel_put(&chan); |
202 | return ret; | 187 | return ret; |
203 | } | 188 | } |
204 | 189 | ||
@@ -206,16 +191,18 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
206 | pfifo->reassign(dev, false); | 191 | pfifo->reassign(dev, false); |
207 | 192 | ||
208 | /* Create a graphics context for new channel */ | 193 | /* Create a graphics context for new channel */ |
209 | ret = pgraph->create_context(chan); | 194 | if (dev_priv->card_type < NV_50) { |
210 | if (ret) { | 195 | ret = pgraph->create_context(chan); |
211 | nouveau_channel_free(chan); | 196 | if (ret) { |
212 | return ret; | 197 | nouveau_channel_put(&chan); |
198 | return ret; | ||
199 | } | ||
213 | } | 200 | } |
214 | 201 | ||
215 | /* Construct inital RAMFC for new channel */ | 202 | /* Construct inital RAMFC for new channel */ |
216 | ret = pfifo->create_context(chan); | 203 | ret = pfifo->create_context(chan); |
217 | if (ret) { | 204 | if (ret) { |
218 | nouveau_channel_free(chan); | 205 | nouveau_channel_put(&chan); |
219 | return ret; | 206 | return ret; |
220 | } | 207 | } |
221 | 208 | ||
@@ -225,83 +212,108 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
225 | if (!ret) | 212 | if (!ret) |
226 | ret = nouveau_fence_channel_init(chan); | 213 | ret = nouveau_fence_channel_init(chan); |
227 | if (ret) { | 214 | if (ret) { |
228 | nouveau_channel_free(chan); | 215 | nouveau_channel_put(&chan); |
229 | return ret; | 216 | return ret; |
230 | } | 217 | } |
231 | 218 | ||
232 | nouveau_debugfs_channel_init(chan); | 219 | nouveau_debugfs_channel_init(chan); |
233 | 220 | ||
234 | NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel); | 221 | NV_DEBUG(dev, "channel %d initialised\n", chan->id); |
235 | *chan_ret = chan; | 222 | *chan_ret = chan; |
236 | return 0; | 223 | return 0; |
237 | } | 224 | } |
238 | 225 | ||
239 | /* stops a fifo */ | 226 | struct nouveau_channel * |
227 | nouveau_channel_get_unlocked(struct nouveau_channel *ref) | ||
228 | { | ||
229 | struct nouveau_channel *chan = NULL; | ||
230 | |||
231 | if (likely(ref && atomic_inc_not_zero(&ref->users))) | ||
232 | nouveau_channel_ref(ref, &chan); | ||
233 | |||
234 | return chan; | ||
235 | } | ||
236 | |||
237 | struct nouveau_channel * | ||
238 | nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) | ||
239 | { | ||
240 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
241 | struct nouveau_channel *chan; | ||
242 | unsigned long flags; | ||
243 | |||
244 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
245 | chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]); | ||
246 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
247 | |||
248 | if (unlikely(!chan)) | ||
249 | return ERR_PTR(-EINVAL); | ||
250 | |||
251 | if (unlikely(file_priv && chan->file_priv != file_priv)) { | ||
252 | nouveau_channel_put_unlocked(&chan); | ||
253 | return ERR_PTR(-EINVAL); | ||
254 | } | ||
255 | |||
256 | mutex_lock(&chan->mutex); | ||
257 | return chan; | ||
258 | } | ||
259 | |||
240 | void | 260 | void |
241 | nouveau_channel_free(struct nouveau_channel *chan) | 261 | nouveau_channel_put_unlocked(struct nouveau_channel **pchan) |
242 | { | 262 | { |
263 | struct nouveau_channel *chan = *pchan; | ||
243 | struct drm_device *dev = chan->dev; | 264 | struct drm_device *dev = chan->dev; |
244 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 265 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
245 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
246 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 266 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; |
267 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
268 | struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; | ||
247 | unsigned long flags; | 269 | unsigned long flags; |
248 | int ret; | ||
249 | 270 | ||
250 | NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); | 271 | /* decrement the refcount, and we're done if there's still refs */ |
272 | if (likely(!atomic_dec_and_test(&chan->users))) { | ||
273 | nouveau_channel_ref(NULL, pchan); | ||
274 | return; | ||
275 | } | ||
251 | 276 | ||
277 | /* noone wants the channel anymore */ | ||
278 | NV_DEBUG(dev, "freeing channel %d\n", chan->id); | ||
252 | nouveau_debugfs_channel_fini(chan); | 279 | nouveau_debugfs_channel_fini(chan); |
253 | 280 | ||
254 | /* Give outstanding push buffers a chance to complete */ | 281 | /* give it chance to idle */ |
255 | nouveau_fence_update(chan); | 282 | nouveau_channel_idle(chan); |
256 | if (chan->fence.sequence != chan->fence.sequence_ack) { | ||
257 | struct nouveau_fence *fence = NULL; | ||
258 | |||
259 | ret = nouveau_fence_new(chan, &fence, true); | ||
260 | if (ret == 0) { | ||
261 | ret = nouveau_fence_wait(fence, NULL, false, false); | ||
262 | nouveau_fence_unref((void *)&fence); | ||
263 | } | ||
264 | |||
265 | if (ret) | ||
266 | NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); | ||
267 | } | ||
268 | 283 | ||
269 | /* Ensure all outstanding fences are signaled. They should be if the | 284 | /* ensure all outstanding fences are signaled. they should be if the |
270 | * above attempts at idling were OK, but if we failed this'll tell TTM | 285 | * above attempts at idling were OK, but if we failed this'll tell TTM |
271 | * we're done with the buffers. | 286 | * we're done with the buffers. |
272 | */ | 287 | */ |
273 | nouveau_fence_channel_fini(chan); | 288 | nouveau_fence_channel_fini(chan); |
274 | 289 | ||
275 | /* This will prevent pfifo from switching channels. */ | 290 | /* boot it off the hardware */ |
276 | pfifo->reassign(dev, false); | 291 | pfifo->reassign(dev, false); |
277 | 292 | ||
278 | /* We want to give pgraph a chance to idle and get rid of all potential | 293 | /* We want to give pgraph a chance to idle and get rid of all |
279 | * errors. We need to do this before the lock, otherwise the irq handler | 294 | * potential errors. We need to do this without the context |
280 | * is unable to process them. | 295 | * switch lock held, otherwise the irq handler is unable to |
296 | * process them. | ||
281 | */ | 297 | */ |
282 | if (pgraph->channel(dev) == chan) | 298 | if (pgraph->channel(dev) == chan) |
283 | nouveau_wait_for_idle(dev); | 299 | nouveau_wait_for_idle(dev); |
284 | 300 | ||
285 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 301 | /* destroy the engine specific contexts */ |
286 | |||
287 | pgraph->fifo_access(dev, false); | ||
288 | if (pgraph->channel(dev) == chan) | ||
289 | pgraph->unload_context(dev); | ||
290 | pgraph->destroy_context(chan); | ||
291 | pgraph->fifo_access(dev, true); | ||
292 | |||
293 | if (pfifo->channel_id(dev) == chan->id) { | ||
294 | pfifo->disable(dev); | ||
295 | pfifo->unload_context(dev); | ||
296 | pfifo->enable(dev); | ||
297 | } | ||
298 | pfifo->destroy_context(chan); | 302 | pfifo->destroy_context(chan); |
303 | pgraph->destroy_context(chan); | ||
304 | if (pcrypt->destroy_context) | ||
305 | pcrypt->destroy_context(chan); | ||
299 | 306 | ||
300 | pfifo->reassign(dev, true); | 307 | pfifo->reassign(dev, true); |
301 | 308 | ||
302 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 309 | /* aside from its resources, the channel should now be dead, |
310 | * remove it from the channel list | ||
311 | */ | ||
312 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
313 | nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]); | ||
314 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
303 | 315 | ||
304 | /* Release the channel's resources */ | 316 | /* destroy any resources the channel owned */ |
305 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); | 317 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); |
306 | if (chan->pushbuf_bo) { | 318 | if (chan->pushbuf_bo) { |
307 | nouveau_bo_unmap(chan->pushbuf_bo); | 319 | nouveau_bo_unmap(chan->pushbuf_bo); |
@@ -310,44 +322,80 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
310 | } | 322 | } |
311 | nouveau_gpuobj_channel_takedown(chan); | 323 | nouveau_gpuobj_channel_takedown(chan); |
312 | nouveau_notifier_takedown_channel(chan); | 324 | nouveau_notifier_takedown_channel(chan); |
313 | if (chan->user) | ||
314 | iounmap(chan->user); | ||
315 | 325 | ||
316 | dev_priv->fifos[chan->id] = NULL; | 326 | nouveau_channel_ref(NULL, pchan); |
327 | } | ||
328 | |||
329 | void | ||
330 | nouveau_channel_put(struct nouveau_channel **pchan) | ||
331 | { | ||
332 | mutex_unlock(&(*pchan)->mutex); | ||
333 | nouveau_channel_put_unlocked(pchan); | ||
334 | } | ||
335 | |||
336 | static void | ||
337 | nouveau_channel_del(struct kref *ref) | ||
338 | { | ||
339 | struct nouveau_channel *chan = | ||
340 | container_of(ref, struct nouveau_channel, ref); | ||
341 | |||
317 | kfree(chan); | 342 | kfree(chan); |
318 | } | 343 | } |
319 | 344 | ||
345 | void | ||
346 | nouveau_channel_ref(struct nouveau_channel *chan, | ||
347 | struct nouveau_channel **pchan) | ||
348 | { | ||
349 | if (chan) | ||
350 | kref_get(&chan->ref); | ||
351 | |||
352 | if (*pchan) | ||
353 | kref_put(&(*pchan)->ref, nouveau_channel_del); | ||
354 | |||
355 | *pchan = chan; | ||
356 | } | ||
357 | |||
358 | void | ||
359 | nouveau_channel_idle(struct nouveau_channel *chan) | ||
360 | { | ||
361 | struct drm_device *dev = chan->dev; | ||
362 | struct nouveau_fence *fence = NULL; | ||
363 | int ret; | ||
364 | |||
365 | nouveau_fence_update(chan); | ||
366 | |||
367 | if (chan->fence.sequence != chan->fence.sequence_ack) { | ||
368 | ret = nouveau_fence_new(chan, &fence, true); | ||
369 | if (!ret) { | ||
370 | ret = nouveau_fence_wait(fence, false, false); | ||
371 | nouveau_fence_unref(&fence); | ||
372 | } | ||
373 | |||
374 | if (ret) | ||
375 | NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); | ||
376 | } | ||
377 | } | ||
378 | |||
320 | /* cleans up all the fifos from file_priv */ | 379 | /* cleans up all the fifos from file_priv */ |
321 | void | 380 | void |
322 | nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) | 381 | nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) |
323 | { | 382 | { |
324 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 383 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
325 | struct nouveau_engine *engine = &dev_priv->engine; | 384 | struct nouveau_engine *engine = &dev_priv->engine; |
385 | struct nouveau_channel *chan; | ||
326 | int i; | 386 | int i; |
327 | 387 | ||
328 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); | 388 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); |
329 | for (i = 0; i < engine->fifo.channels; i++) { | 389 | for (i = 0; i < engine->fifo.channels; i++) { |
330 | struct nouveau_channel *chan = dev_priv->fifos[i]; | 390 | chan = nouveau_channel_get(dev, file_priv, i); |
391 | if (IS_ERR(chan)) | ||
392 | continue; | ||
331 | 393 | ||
332 | if (chan && chan->file_priv == file_priv) | 394 | atomic_dec(&chan->users); |
333 | nouveau_channel_free(chan); | 395 | nouveau_channel_put(&chan); |
334 | } | 396 | } |
335 | } | 397 | } |
336 | 398 | ||
337 | int | ||
338 | nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv, | ||
339 | int channel) | ||
340 | { | ||
341 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
342 | struct nouveau_engine *engine = &dev_priv->engine; | ||
343 | |||
344 | if (channel >= engine->fifo.channels) | ||
345 | return 0; | ||
346 | if (dev_priv->fifos[channel] == NULL) | ||
347 | return 0; | ||
348 | |||
349 | return (dev_priv->fifos[channel]->file_priv == file_priv); | ||
350 | } | ||
351 | 399 | ||
352 | /*********************************** | 400 | /*********************************** |
353 | * ioctls wrapping the functions | 401 | * ioctls wrapping the functions |
@@ -395,24 +443,26 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, | |||
395 | /* Named memory object area */ | 443 | /* Named memory object area */ |
396 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, | 444 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, |
397 | &init->notifier_handle); | 445 | &init->notifier_handle); |
398 | if (ret) { | ||
399 | nouveau_channel_free(chan); | ||
400 | return ret; | ||
401 | } | ||
402 | 446 | ||
403 | return 0; | 447 | if (ret == 0) |
448 | atomic_inc(&chan->users); /* userspace reference */ | ||
449 | nouveau_channel_put(&chan); | ||
450 | return ret; | ||
404 | } | 451 | } |
405 | 452 | ||
406 | static int | 453 | static int |
407 | nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, | 454 | nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, |
408 | struct drm_file *file_priv) | 455 | struct drm_file *file_priv) |
409 | { | 456 | { |
410 | struct drm_nouveau_channel_free *cfree = data; | 457 | struct drm_nouveau_channel_free *req = data; |
411 | struct nouveau_channel *chan; | 458 | struct nouveau_channel *chan; |
412 | 459 | ||
413 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); | 460 | chan = nouveau_channel_get(dev, file_priv, req->channel); |
461 | if (IS_ERR(chan)) | ||
462 | return PTR_ERR(chan); | ||
414 | 463 | ||
415 | nouveau_channel_free(chan); | 464 | atomic_dec(&chan->users); |
465 | nouveau_channel_put(&chan); | ||
416 | return 0; | 466 | return 0; |
417 | } | 467 | } |
418 | 468 | ||
@@ -421,18 +471,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, | |||
421 | ***********************************/ | 471 | ***********************************/ |
422 | 472 | ||
423 | struct drm_ioctl_desc nouveau_ioctls[] = { | 473 | struct drm_ioctl_desc nouveau_ioctls[] = { |
424 | DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), | 474 | DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), |
425 | DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 475 | DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
426 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), | 476 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH), |
427 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), | 477 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH), |
428 | DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), | 478 | DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), |
429 | DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), | 479 | DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH), |
430 | DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), | 480 | DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), |
431 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), | 481 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), |
432 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), | 482 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), |
433 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), | 483 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), |
434 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), | 484 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), |
435 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), | 485 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), |
436 | }; | 486 | }; |
437 | 487 | ||
438 | int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); | 488 | int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 52c356e9a3d1..a21e00076839 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #include "nouveau_connector.h" | 37 | #include "nouveau_connector.h" |
38 | #include "nouveau_hw.h" | 38 | #include "nouveau_hw.h" |
39 | 39 | ||
40 | static void nouveau_connector_hotplug(void *, int); | ||
41 | |||
40 | static struct nouveau_encoder * | 42 | static struct nouveau_encoder * |
41 | find_encoder_by_type(struct drm_connector *connector, int type) | 43 | find_encoder_by_type(struct drm_connector *connector, int type) |
42 | { | 44 | { |
@@ -94,22 +96,30 @@ nouveau_connector_bpp(struct drm_connector *connector) | |||
94 | } | 96 | } |
95 | 97 | ||
96 | static void | 98 | static void |
97 | nouveau_connector_destroy(struct drm_connector *drm_connector) | 99 | nouveau_connector_destroy(struct drm_connector *connector) |
98 | { | 100 | { |
99 | struct nouveau_connector *nv_connector = | 101 | struct nouveau_connector *nv_connector = nouveau_connector(connector); |
100 | nouveau_connector(drm_connector); | 102 | struct drm_nouveau_private *dev_priv; |
103 | struct nouveau_gpio_engine *pgpio; | ||
101 | struct drm_device *dev; | 104 | struct drm_device *dev; |
102 | 105 | ||
103 | if (!nv_connector) | 106 | if (!nv_connector) |
104 | return; | 107 | return; |
105 | 108 | ||
106 | dev = nv_connector->base.dev; | 109 | dev = nv_connector->base.dev; |
110 | dev_priv = dev->dev_private; | ||
107 | NV_DEBUG_KMS(dev, "\n"); | 111 | NV_DEBUG_KMS(dev, "\n"); |
108 | 112 | ||
113 | pgpio = &dev_priv->engine.gpio; | ||
114 | if (pgpio->irq_unregister) { | ||
115 | pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag, | ||
116 | nouveau_connector_hotplug, connector); | ||
117 | } | ||
118 | |||
109 | kfree(nv_connector->edid); | 119 | kfree(nv_connector->edid); |
110 | drm_sysfs_connector_remove(drm_connector); | 120 | drm_sysfs_connector_remove(connector); |
111 | drm_connector_cleanup(drm_connector); | 121 | drm_connector_cleanup(connector); |
112 | kfree(drm_connector); | 122 | kfree(connector); |
113 | } | 123 | } |
114 | 124 | ||
115 | static struct nouveau_i2c_chan * | 125 | static struct nouveau_i2c_chan * |
@@ -760,6 +770,7 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
760 | { | 770 | { |
761 | const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; | 771 | const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; |
762 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 772 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
773 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
763 | struct nouveau_connector *nv_connector = NULL; | 774 | struct nouveau_connector *nv_connector = NULL; |
764 | struct dcb_connector_table_entry *dcb = NULL; | 775 | struct dcb_connector_table_entry *dcb = NULL; |
765 | struct drm_connector *connector; | 776 | struct drm_connector *connector; |
@@ -876,6 +887,11 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
876 | break; | 887 | break; |
877 | } | 888 | } |
878 | 889 | ||
890 | if (pgpio->irq_register) { | ||
891 | pgpio->irq_register(dev, nv_connector->dcb->gpio_tag, | ||
892 | nouveau_connector_hotplug, connector); | ||
893 | } | ||
894 | |||
879 | drm_sysfs_connector_add(connector); | 895 | drm_sysfs_connector_add(connector); |
880 | dcb->drm = connector; | 896 | dcb->drm = connector; |
881 | return dcb->drm; | 897 | return dcb->drm; |
@@ -886,3 +902,29 @@ fail: | |||
886 | return ERR_PTR(ret); | 902 | return ERR_PTR(ret); |
887 | 903 | ||
888 | } | 904 | } |
905 | |||
906 | static void | ||
907 | nouveau_connector_hotplug(void *data, int plugged) | ||
908 | { | ||
909 | struct drm_connector *connector = data; | ||
910 | struct drm_device *dev = connector->dev; | ||
911 | |||
912 | NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", | ||
913 | drm_get_connector_name(connector)); | ||
914 | |||
915 | if (connector->encoder && connector->encoder->crtc && | ||
916 | connector->encoder->crtc->enabled) { | ||
917 | struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder); | ||
918 | struct drm_encoder_helper_funcs *helper = | ||
919 | connector->encoder->helper_private; | ||
920 | |||
921 | if (nv_encoder->dcb->type == OUTPUT_DP) { | ||
922 | if (plugged) | ||
923 | helper->dpms(connector->encoder, DRM_MODE_DPMS_ON); | ||
924 | else | ||
925 | helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); | ||
926 | } | ||
927 | } | ||
928 | |||
929 | drm_helper_hpd_irq_event(dev); | ||
930 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 2e11fd65b4dd..505c6bfb4d75 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -29,6 +29,9 @@ | |||
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_fb.h" | 30 | #include "nouveau_fb.h" |
31 | #include "nouveau_fbcon.h" | 31 | #include "nouveau_fbcon.h" |
32 | #include "nouveau_hw.h" | ||
33 | #include "nouveau_crtc.h" | ||
34 | #include "nouveau_dma.h" | ||
32 | 35 | ||
33 | static void | 36 | static void |
34 | nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) | 37 | nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) |
@@ -104,3 +107,207 @@ const struct drm_mode_config_funcs nouveau_mode_config_funcs = { | |||
104 | .output_poll_changed = nouveau_fbcon_output_poll_changed, | 107 | .output_poll_changed = nouveau_fbcon_output_poll_changed, |
105 | }; | 108 | }; |
106 | 109 | ||
110 | int | ||
111 | nouveau_vblank_enable(struct drm_device *dev, int crtc) | ||
112 | { | ||
113 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
114 | |||
115 | if (dev_priv->card_type >= NV_50) | ||
116 | nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0, | ||
117 | NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc)); | ||
118 | else | ||
119 | NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, | ||
120 | NV_PCRTC_INTR_0_VBLANK); | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | void | ||
126 | nouveau_vblank_disable(struct drm_device *dev, int crtc) | ||
127 | { | ||
128 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
129 | |||
130 | if (dev_priv->card_type >= NV_50) | ||
131 | nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, | ||
132 | NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0); | ||
133 | else | ||
134 | NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0); | ||
135 | } | ||
136 | |||
137 | static int | ||
138 | nouveau_page_flip_reserve(struct nouveau_bo *old_bo, | ||
139 | struct nouveau_bo *new_bo) | ||
140 | { | ||
141 | int ret; | ||
142 | |||
143 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); | ||
144 | if (ret) | ||
145 | return ret; | ||
146 | |||
147 | ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0); | ||
148 | if (ret) | ||
149 | goto fail; | ||
150 | |||
151 | ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0); | ||
152 | if (ret) | ||
153 | goto fail_unreserve; | ||
154 | |||
155 | return 0; | ||
156 | |||
157 | fail_unreserve: | ||
158 | ttm_bo_unreserve(&new_bo->bo); | ||
159 | fail: | ||
160 | nouveau_bo_unpin(new_bo); | ||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | static void | ||
165 | nouveau_page_flip_unreserve(struct nouveau_bo *old_bo, | ||
166 | struct nouveau_bo *new_bo, | ||
167 | struct nouveau_fence *fence) | ||
168 | { | ||
169 | nouveau_bo_fence(new_bo, fence); | ||
170 | ttm_bo_unreserve(&new_bo->bo); | ||
171 | |||
172 | nouveau_bo_fence(old_bo, fence); | ||
173 | ttm_bo_unreserve(&old_bo->bo); | ||
174 | |||
175 | nouveau_bo_unpin(old_bo); | ||
176 | } | ||
177 | |||
178 | static int | ||
179 | nouveau_page_flip_emit(struct nouveau_channel *chan, | ||
180 | struct nouveau_bo *old_bo, | ||
181 | struct nouveau_bo *new_bo, | ||
182 | struct nouveau_page_flip_state *s, | ||
183 | struct nouveau_fence **pfence) | ||
184 | { | ||
185 | struct drm_device *dev = chan->dev; | ||
186 | unsigned long flags; | ||
187 | int ret; | ||
188 | |||
189 | /* Queue it to the pending list */ | ||
190 | spin_lock_irqsave(&dev->event_lock, flags); | ||
191 | list_add_tail(&s->head, &chan->nvsw.flip); | ||
192 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
193 | |||
194 | /* Synchronize with the old framebuffer */ | ||
195 | ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan); | ||
196 | if (ret) | ||
197 | goto fail; | ||
198 | |||
199 | /* Emit the pageflip */ | ||
200 | ret = RING_SPACE(chan, 2); | ||
201 | if (ret) | ||
202 | goto fail; | ||
203 | |||
204 | BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); | ||
205 | OUT_RING(chan, 0); | ||
206 | FIRE_RING(chan); | ||
207 | |||
208 | ret = nouveau_fence_new(chan, pfence, true); | ||
209 | if (ret) | ||
210 | goto fail; | ||
211 | |||
212 | return 0; | ||
213 | fail: | ||
214 | spin_lock_irqsave(&dev->event_lock, flags); | ||
215 | list_del(&s->head); | ||
216 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
217 | return ret; | ||
218 | } | ||
219 | |||
220 | int | ||
221 | nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
222 | struct drm_pending_vblank_event *event) | ||
223 | { | ||
224 | struct drm_device *dev = crtc->dev; | ||
225 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
226 | struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; | ||
227 | struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; | ||
228 | struct nouveau_page_flip_state *s; | ||
229 | struct nouveau_channel *chan; | ||
230 | struct nouveau_fence *fence; | ||
231 | int ret; | ||
232 | |||
233 | if (dev_priv->engine.graph.accel_blocked) | ||
234 | return -ENODEV; | ||
235 | |||
236 | s = kzalloc(sizeof(*s), GFP_KERNEL); | ||
237 | if (!s) | ||
238 | return -ENOMEM; | ||
239 | |||
240 | /* Don't let the buffers go away while we flip */ | ||
241 | ret = nouveau_page_flip_reserve(old_bo, new_bo); | ||
242 | if (ret) | ||
243 | goto fail_free; | ||
244 | |||
245 | /* Initialize a page flip struct */ | ||
246 | *s = (struct nouveau_page_flip_state) | ||
247 | { { }, s->event, nouveau_crtc(crtc)->index, | ||
248 | fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y, | ||
249 | new_bo->bo.offset }; | ||
250 | |||
251 | /* Choose the channel the flip will be handled in */ | ||
252 | chan = nouveau_fence_channel(new_bo->bo.sync_obj); | ||
253 | if (!chan) | ||
254 | chan = nouveau_channel_get_unlocked(dev_priv->channel); | ||
255 | mutex_lock(&chan->mutex); | ||
256 | |||
257 | /* Emit a page flip */ | ||
258 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | ||
259 | nouveau_channel_put(&chan); | ||
260 | if (ret) | ||
261 | goto fail_unreserve; | ||
262 | |||
263 | /* Update the crtc struct and cleanup */ | ||
264 | crtc->fb = fb; | ||
265 | |||
266 | nouveau_page_flip_unreserve(old_bo, new_bo, fence); | ||
267 | nouveau_fence_unref(&fence); | ||
268 | return 0; | ||
269 | |||
270 | fail_unreserve: | ||
271 | nouveau_page_flip_unreserve(old_bo, new_bo, NULL); | ||
272 | fail_free: | ||
273 | kfree(s); | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | int | ||
278 | nouveau_finish_page_flip(struct nouveau_channel *chan, | ||
279 | struct nouveau_page_flip_state *ps) | ||
280 | { | ||
281 | struct drm_device *dev = chan->dev; | ||
282 | struct nouveau_page_flip_state *s; | ||
283 | unsigned long flags; | ||
284 | |||
285 | spin_lock_irqsave(&dev->event_lock, flags); | ||
286 | |||
287 | if (list_empty(&chan->nvsw.flip)) { | ||
288 | NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id); | ||
289 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
290 | return -EINVAL; | ||
291 | } | ||
292 | |||
293 | s = list_first_entry(&chan->nvsw.flip, | ||
294 | struct nouveau_page_flip_state, head); | ||
295 | if (s->event) { | ||
296 | struct drm_pending_vblank_event *e = s->event; | ||
297 | struct timeval now; | ||
298 | |||
299 | do_gettimeofday(&now); | ||
300 | e->event.sequence = 0; | ||
301 | e->event.tv_sec = now.tv_sec; | ||
302 | e->event.tv_usec = now.tv_usec; | ||
303 | list_add_tail(&e->base.link, &e->base.file_priv->event_list); | ||
304 | wake_up_interruptible(&e->base.file_priv->event_wait); | ||
305 | } | ||
306 | |||
307 | list_del(&s->head); | ||
308 | *ps = *s; | ||
309 | kfree(s); | ||
310 | |||
311 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
312 | return 0; | ||
313 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 82581e600dcd..6ff77cedc008 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -59,17 +59,11 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
59 | { | 59 | { |
60 | struct drm_device *dev = chan->dev; | 60 | struct drm_device *dev = chan->dev; |
61 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 61 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
62 | struct nouveau_gpuobj *obj = NULL; | ||
63 | int ret, i; | 62 | int ret, i; |
64 | 63 | ||
65 | /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ | 64 | /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ |
66 | ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? | 65 | ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ? |
67 | 0x0039 : 0x5039, &obj); | 66 | 0x0039 : 0x5039); |
68 | if (ret) | ||
69 | return ret; | ||
70 | |||
71 | ret = nouveau_ramht_insert(chan, NvM2MF, obj); | ||
72 | nouveau_gpuobj_ref(NULL, &obj); | ||
73 | if (ret) | 67 | if (ret) |
74 | return ret; | 68 | return ret; |
75 | 69 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 4562f309ae3d..38d599554bce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c | |||
@@ -279,7 +279,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder) | |||
279 | struct bit_displayport_encoder_table *dpe; | 279 | struct bit_displayport_encoder_table *dpe; |
280 | int dpe_headerlen; | 280 | int dpe_headerlen; |
281 | uint8_t config[4], status[3]; | 281 | uint8_t config[4], status[3]; |
282 | bool cr_done, cr_max_vs, eq_done; | 282 | bool cr_done, cr_max_vs, eq_done, hpd_state; |
283 | int ret = 0, i, tries, voltage; | 283 | int ret = 0, i, tries, voltage; |
284 | 284 | ||
285 | NV_DEBUG_KMS(dev, "link training!!\n"); | 285 | NV_DEBUG_KMS(dev, "link training!!\n"); |
@@ -297,7 +297,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder) | |||
297 | /* disable hotplug detect, this flips around on some panels during | 297 | /* disable hotplug detect, this flips around on some panels during |
298 | * link training. | 298 | * link training. |
299 | */ | 299 | */ |
300 | pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); | 300 | hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); |
301 | 301 | ||
302 | if (dpe->script0) { | 302 | if (dpe->script0) { |
303 | NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); | 303 | NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); |
@@ -439,7 +439,7 @@ stop: | |||
439 | } | 439 | } |
440 | 440 | ||
441 | /* re-enable hotplug detect */ | 441 | /* re-enable hotplug detect */ |
442 | pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true); | 442 | pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state); |
443 | 443 | ||
444 | return eq_done; | 444 | return eq_done; |
445 | } | 445 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 90875494a65a..bb170570938b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -115,6 +115,10 @@ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n"); | |||
115 | int nouveau_perflvl_wr; | 115 | int nouveau_perflvl_wr; |
116 | module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); | 116 | module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); |
117 | 117 | ||
118 | MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n"); | ||
119 | int nouveau_msi; | ||
120 | module_param_named(msi, nouveau_msi, int, 0400); | ||
121 | |||
118 | int nouveau_fbpercrtc; | 122 | int nouveau_fbpercrtc; |
119 | #if 0 | 123 | #if 0 |
120 | module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); | 124 | module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); |
@@ -193,23 +197,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
193 | 197 | ||
194 | NV_INFO(dev, "Idling channels...\n"); | 198 | NV_INFO(dev, "Idling channels...\n"); |
195 | for (i = 0; i < pfifo->channels; i++) { | 199 | for (i = 0; i < pfifo->channels; i++) { |
196 | struct nouveau_fence *fence = NULL; | 200 | chan = dev_priv->channels.ptr[i]; |
197 | |||
198 | chan = dev_priv->fifos[i]; | ||
199 | if (!chan || (dev_priv->card_type >= NV_50 && | ||
200 | chan == dev_priv->fifos[0])) | ||
201 | continue; | ||
202 | |||
203 | ret = nouveau_fence_new(chan, &fence, true); | ||
204 | if (ret == 0) { | ||
205 | ret = nouveau_fence_wait(fence, NULL, false, false); | ||
206 | nouveau_fence_unref((void *)&fence); | ||
207 | } | ||
208 | 201 | ||
209 | if (ret) { | 202 | if (chan && chan->pushbuf_bo) |
210 | NV_ERROR(dev, "Failed to idle channel %d for suspend\n", | 203 | nouveau_channel_idle(chan); |
211 | chan->id); | ||
212 | } | ||
213 | } | 204 | } |
214 | 205 | ||
215 | pgraph->fifo_access(dev, false); | 206 | pgraph->fifo_access(dev, false); |
@@ -219,17 +210,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
219 | pfifo->unload_context(dev); | 210 | pfifo->unload_context(dev); |
220 | pgraph->unload_context(dev); | 211 | pgraph->unload_context(dev); |
221 | 212 | ||
222 | NV_INFO(dev, "Suspending GPU objects...\n"); | 213 | ret = pinstmem->suspend(dev); |
223 | ret = nouveau_gpuobj_suspend(dev); | ||
224 | if (ret) { | 214 | if (ret) { |
225 | NV_ERROR(dev, "... failed: %d\n", ret); | 215 | NV_ERROR(dev, "... failed: %d\n", ret); |
226 | goto out_abort; | 216 | goto out_abort; |
227 | } | 217 | } |
228 | 218 | ||
229 | ret = pinstmem->suspend(dev); | 219 | NV_INFO(dev, "Suspending GPU objects...\n"); |
220 | ret = nouveau_gpuobj_suspend(dev); | ||
230 | if (ret) { | 221 | if (ret) { |
231 | NV_ERROR(dev, "... failed: %d\n", ret); | 222 | NV_ERROR(dev, "... failed: %d\n", ret); |
232 | nouveau_gpuobj_suspend_cleanup(dev); | 223 | pinstmem->resume(dev); |
233 | goto out_abort; | 224 | goto out_abort; |
234 | } | 225 | } |
235 | 226 | ||
@@ -294,17 +285,18 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
294 | } | 285 | } |
295 | } | 286 | } |
296 | 287 | ||
288 | NV_INFO(dev, "Restoring GPU objects...\n"); | ||
289 | nouveau_gpuobj_resume(dev); | ||
290 | |||
297 | NV_INFO(dev, "Reinitialising engines...\n"); | 291 | NV_INFO(dev, "Reinitialising engines...\n"); |
298 | engine->instmem.resume(dev); | 292 | engine->instmem.resume(dev); |
299 | engine->mc.init(dev); | 293 | engine->mc.init(dev); |
300 | engine->timer.init(dev); | 294 | engine->timer.init(dev); |
301 | engine->fb.init(dev); | 295 | engine->fb.init(dev); |
302 | engine->graph.init(dev); | 296 | engine->graph.init(dev); |
297 | engine->crypt.init(dev); | ||
303 | engine->fifo.init(dev); | 298 | engine->fifo.init(dev); |
304 | 299 | ||
305 | NV_INFO(dev, "Restoring GPU objects...\n"); | ||
306 | nouveau_gpuobj_resume(dev); | ||
307 | |||
308 | nouveau_irq_postinstall(dev); | 300 | nouveau_irq_postinstall(dev); |
309 | 301 | ||
310 | /* Re-write SKIPS, they'll have been lost over the suspend */ | 302 | /* Re-write SKIPS, they'll have been lost over the suspend */ |
@@ -313,7 +305,7 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
313 | int j; | 305 | int j; |
314 | 306 | ||
315 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 307 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
316 | chan = dev_priv->fifos[i]; | 308 | chan = dev_priv->channels.ptr[i]; |
317 | if (!chan || !chan->pushbuf_bo) | 309 | if (!chan || !chan->pushbuf_bo) |
318 | continue; | 310 | continue; |
319 | 311 | ||
@@ -347,13 +339,11 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
347 | 339 | ||
348 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 340 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
349 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 341 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
342 | u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT; | ||
350 | 343 | ||
351 | nv_crtc->cursor.set_offset(nv_crtc, | 344 | nv_crtc->cursor.set_offset(nv_crtc, offset); |
352 | nv_crtc->cursor.nvbo->bo.offset - | ||
353 | dev_priv->vm_vram_base); | ||
354 | |||
355 | nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, | 345 | nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, |
356 | nv_crtc->cursor_saved_y); | 346 | nv_crtc->cursor_saved_y); |
357 | } | 347 | } |
358 | 348 | ||
359 | /* Force CLUT to get re-loaded during modeset */ | 349 | /* Force CLUT to get re-loaded during modeset */ |
@@ -393,6 +383,9 @@ static struct drm_driver driver = { | |||
393 | .irq_postinstall = nouveau_irq_postinstall, | 383 | .irq_postinstall = nouveau_irq_postinstall, |
394 | .irq_uninstall = nouveau_irq_uninstall, | 384 | .irq_uninstall = nouveau_irq_uninstall, |
395 | .irq_handler = nouveau_irq_handler, | 385 | .irq_handler = nouveau_irq_handler, |
386 | .get_vblank_counter = drm_vblank_count, | ||
387 | .enable_vblank = nouveau_vblank_enable, | ||
388 | .disable_vblank = nouveau_vblank_disable, | ||
396 | .reclaim_buffers = drm_core_reclaim_buffers, | 389 | .reclaim_buffers = drm_core_reclaim_buffers, |
397 | .ioctls = nouveau_ioctls, | 390 | .ioctls = nouveau_ioctls, |
398 | .fops = { | 391 | .fops = { |
@@ -403,6 +396,7 @@ static struct drm_driver driver = { | |||
403 | .mmap = nouveau_ttm_mmap, | 396 | .mmap = nouveau_ttm_mmap, |
404 | .poll = drm_poll, | 397 | .poll = drm_poll, |
405 | .fasync = drm_fasync, | 398 | .fasync = drm_fasync, |
399 | .read = drm_read, | ||
406 | #if defined(CONFIG_COMPAT) | 400 | #if defined(CONFIG_COMPAT) |
407 | .compat_ioctl = nouveau_compat_ioctl, | 401 | .compat_ioctl = nouveau_compat_ioctl, |
408 | #endif | 402 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 1c7db64c03bf..8f13906185b2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -54,22 +54,36 @@ struct nouveau_fpriv { | |||
54 | #include "nouveau_drm.h" | 54 | #include "nouveau_drm.h" |
55 | #include "nouveau_reg.h" | 55 | #include "nouveau_reg.h" |
56 | #include "nouveau_bios.h" | 56 | #include "nouveau_bios.h" |
57 | #include "nouveau_util.h" | ||
58 | |||
57 | struct nouveau_grctx; | 59 | struct nouveau_grctx; |
60 | struct nouveau_vram; | ||
61 | #include "nouveau_vm.h" | ||
58 | 62 | ||
59 | #define MAX_NUM_DCB_ENTRIES 16 | 63 | #define MAX_NUM_DCB_ENTRIES 16 |
60 | 64 | ||
61 | #define NOUVEAU_MAX_CHANNEL_NR 128 | 65 | #define NOUVEAU_MAX_CHANNEL_NR 128 |
62 | #define NOUVEAU_MAX_TILE_NR 15 | 66 | #define NOUVEAU_MAX_TILE_NR 15 |
63 | 67 | ||
64 | #define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) | 68 | struct nouveau_vram { |
65 | #define NV50_VM_BLOCK (512*1024*1024ULL) | 69 | struct drm_device *dev; |
66 | #define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) | 70 | |
71 | struct nouveau_vma bar_vma; | ||
72 | |||
73 | struct list_head regions; | ||
74 | u32 memtype; | ||
75 | u64 offset; | ||
76 | u64 size; | ||
77 | }; | ||
67 | 78 | ||
68 | struct nouveau_tile_reg { | 79 | struct nouveau_tile_reg { |
69 | struct nouveau_fence *fence; | ||
70 | uint32_t addr; | ||
71 | uint32_t size; | ||
72 | bool used; | 80 | bool used; |
81 | uint32_t addr; | ||
82 | uint32_t limit; | ||
83 | uint32_t pitch; | ||
84 | uint32_t zcomp; | ||
85 | struct drm_mm_node *tag_mem; | ||
86 | struct nouveau_fence *fence; | ||
73 | }; | 87 | }; |
74 | 88 | ||
75 | struct nouveau_bo { | 89 | struct nouveau_bo { |
@@ -88,6 +102,7 @@ struct nouveau_bo { | |||
88 | 102 | ||
89 | struct nouveau_channel *channel; | 103 | struct nouveau_channel *channel; |
90 | 104 | ||
105 | struct nouveau_vma vma; | ||
91 | bool mappable; | 106 | bool mappable; |
92 | bool no_vm; | 107 | bool no_vm; |
93 | 108 | ||
@@ -96,7 +111,6 @@ struct nouveau_bo { | |||
96 | struct nouveau_tile_reg *tile; | 111 | struct nouveau_tile_reg *tile; |
97 | 112 | ||
98 | struct drm_gem_object *gem; | 113 | struct drm_gem_object *gem; |
99 | struct drm_file *cpu_filp; | ||
100 | int pin_refcnt; | 114 | int pin_refcnt; |
101 | }; | 115 | }; |
102 | 116 | ||
@@ -133,20 +147,28 @@ enum nouveau_flags { | |||
133 | 147 | ||
134 | #define NVOBJ_ENGINE_SW 0 | 148 | #define NVOBJ_ENGINE_SW 0 |
135 | #define NVOBJ_ENGINE_GR 1 | 149 | #define NVOBJ_ENGINE_GR 1 |
136 | #define NVOBJ_ENGINE_DISPLAY 2 | 150 | #define NVOBJ_ENGINE_PPP 2 |
151 | #define NVOBJ_ENGINE_COPY 3 | ||
152 | #define NVOBJ_ENGINE_VP 4 | ||
153 | #define NVOBJ_ENGINE_CRYPT 5 | ||
154 | #define NVOBJ_ENGINE_BSP 6 | ||
155 | #define NVOBJ_ENGINE_DISPLAY 0xcafe0001 | ||
137 | #define NVOBJ_ENGINE_INT 0xdeadbeef | 156 | #define NVOBJ_ENGINE_INT 0xdeadbeef |
138 | 157 | ||
158 | #define NVOBJ_FLAG_DONT_MAP (1 << 0) | ||
139 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) | 159 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) |
140 | #define NVOBJ_FLAG_ZERO_FREE (1 << 2) | 160 | #define NVOBJ_FLAG_ZERO_FREE (1 << 2) |
161 | #define NVOBJ_FLAG_VM (1 << 3) | ||
162 | |||
163 | #define NVOBJ_CINST_GLOBAL 0xdeadbeef | ||
164 | |||
141 | struct nouveau_gpuobj { | 165 | struct nouveau_gpuobj { |
142 | struct drm_device *dev; | 166 | struct drm_device *dev; |
143 | struct kref refcount; | 167 | struct kref refcount; |
144 | struct list_head list; | 168 | struct list_head list; |
145 | 169 | ||
146 | struct drm_mm_node *im_pramin; | 170 | void *node; |
147 | struct nouveau_bo *im_backing; | 171 | u32 *suspend; |
148 | uint32_t *im_backing_suspend; | ||
149 | int im_bound; | ||
150 | 172 | ||
151 | uint32_t flags; | 173 | uint32_t flags; |
152 | 174 | ||
@@ -162,10 +184,29 @@ struct nouveau_gpuobj { | |||
162 | void *priv; | 184 | void *priv; |
163 | }; | 185 | }; |
164 | 186 | ||
187 | struct nouveau_page_flip_state { | ||
188 | struct list_head head; | ||
189 | struct drm_pending_vblank_event *event; | ||
190 | int crtc, bpp, pitch, x, y; | ||
191 | uint64_t offset; | ||
192 | }; | ||
193 | |||
194 | enum nouveau_channel_mutex_class { | ||
195 | NOUVEAU_UCHANNEL_MUTEX, | ||
196 | NOUVEAU_KCHANNEL_MUTEX | ||
197 | }; | ||
198 | |||
165 | struct nouveau_channel { | 199 | struct nouveau_channel { |
166 | struct drm_device *dev; | 200 | struct drm_device *dev; |
167 | int id; | 201 | int id; |
168 | 202 | ||
203 | /* references to the channel data structure */ | ||
204 | struct kref ref; | ||
205 | /* users of the hardware channel resources, the hardware | ||
206 | * context will be kicked off when it reaches zero. */ | ||
207 | atomic_t users; | ||
208 | struct mutex mutex; | ||
209 | |||
169 | /* owner of this fifo */ | 210 | /* owner of this fifo */ |
170 | struct drm_file *file_priv; | 211 | struct drm_file *file_priv; |
171 | /* mapping of the fifo itself */ | 212 | /* mapping of the fifo itself */ |
@@ -202,12 +243,12 @@ struct nouveau_channel { | |||
202 | /* PGRAPH context */ | 243 | /* PGRAPH context */ |
203 | /* XXX may be merge 2 pointers as private data ??? */ | 244 | /* XXX may be merge 2 pointers as private data ??? */ |
204 | struct nouveau_gpuobj *ramin_grctx; | 245 | struct nouveau_gpuobj *ramin_grctx; |
246 | struct nouveau_gpuobj *crypt_ctx; | ||
205 | void *pgraph_ctx; | 247 | void *pgraph_ctx; |
206 | 248 | ||
207 | /* NV50 VM */ | 249 | /* NV50 VM */ |
250 | struct nouveau_vm *vm; | ||
208 | struct nouveau_gpuobj *vm_pd; | 251 | struct nouveau_gpuobj *vm_pd; |
209 | struct nouveau_gpuobj *vm_gart_pt; | ||
210 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; | ||
211 | 252 | ||
212 | /* Objects */ | 253 | /* Objects */ |
213 | struct nouveau_gpuobj *ramin; /* Private instmem */ | 254 | struct nouveau_gpuobj *ramin; /* Private instmem */ |
@@ -238,9 +279,11 @@ struct nouveau_channel { | |||
238 | 279 | ||
239 | struct { | 280 | struct { |
240 | struct nouveau_gpuobj *vblsem; | 281 | struct nouveau_gpuobj *vblsem; |
282 | uint32_t vblsem_head; | ||
241 | uint32_t vblsem_offset; | 283 | uint32_t vblsem_offset; |
242 | uint32_t vblsem_rval; | 284 | uint32_t vblsem_rval; |
243 | struct list_head vbl_wait; | 285 | struct list_head vbl_wait; |
286 | struct list_head flip; | ||
244 | } nvsw; | 287 | } nvsw; |
245 | 288 | ||
246 | struct { | 289 | struct { |
@@ -258,11 +301,11 @@ struct nouveau_instmem_engine { | |||
258 | int (*suspend)(struct drm_device *dev); | 301 | int (*suspend)(struct drm_device *dev); |
259 | void (*resume)(struct drm_device *dev); | 302 | void (*resume)(struct drm_device *dev); |
260 | 303 | ||
261 | int (*populate)(struct drm_device *, struct nouveau_gpuobj *, | 304 | int (*get)(struct nouveau_gpuobj *, u32 size, u32 align); |
262 | uint32_t *size); | 305 | void (*put)(struct nouveau_gpuobj *); |
263 | void (*clear)(struct drm_device *, struct nouveau_gpuobj *); | 306 | int (*map)(struct nouveau_gpuobj *); |
264 | int (*bind)(struct drm_device *, struct nouveau_gpuobj *); | 307 | void (*unmap)(struct nouveau_gpuobj *); |
265 | int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); | 308 | |
266 | void (*flush)(struct drm_device *); | 309 | void (*flush)(struct drm_device *); |
267 | }; | 310 | }; |
268 | 311 | ||
@@ -279,12 +322,17 @@ struct nouveau_timer_engine { | |||
279 | 322 | ||
280 | struct nouveau_fb_engine { | 323 | struct nouveau_fb_engine { |
281 | int num_tiles; | 324 | int num_tiles; |
325 | struct drm_mm tag_heap; | ||
326 | void *priv; | ||
282 | 327 | ||
283 | int (*init)(struct drm_device *dev); | 328 | int (*init)(struct drm_device *dev); |
284 | void (*takedown)(struct drm_device *dev); | 329 | void (*takedown)(struct drm_device *dev); |
285 | 330 | ||
286 | void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, | 331 | void (*init_tile_region)(struct drm_device *dev, int i, |
287 | uint32_t size, uint32_t pitch); | 332 | uint32_t addr, uint32_t size, |
333 | uint32_t pitch, uint32_t flags); | ||
334 | void (*set_tile_region)(struct drm_device *dev, int i); | ||
335 | void (*free_tile_region)(struct drm_device *dev, int i); | ||
288 | }; | 336 | }; |
289 | 337 | ||
290 | struct nouveau_fifo_engine { | 338 | struct nouveau_fifo_engine { |
@@ -310,21 +358,9 @@ struct nouveau_fifo_engine { | |||
310 | void (*tlb_flush)(struct drm_device *dev); | 358 | void (*tlb_flush)(struct drm_device *dev); |
311 | }; | 359 | }; |
312 | 360 | ||
313 | struct nouveau_pgraph_object_method { | ||
314 | int id; | ||
315 | int (*exec)(struct nouveau_channel *chan, int grclass, int mthd, | ||
316 | uint32_t data); | ||
317 | }; | ||
318 | |||
319 | struct nouveau_pgraph_object_class { | ||
320 | int id; | ||
321 | bool software; | ||
322 | struct nouveau_pgraph_object_method *methods; | ||
323 | }; | ||
324 | |||
325 | struct nouveau_pgraph_engine { | 361 | struct nouveau_pgraph_engine { |
326 | struct nouveau_pgraph_object_class *grclass; | ||
327 | bool accel_blocked; | 362 | bool accel_blocked; |
363 | bool registered; | ||
328 | int grctx_size; | 364 | int grctx_size; |
329 | 365 | ||
330 | /* NV2x/NV3x context table (0x400780) */ | 366 | /* NV2x/NV3x context table (0x400780) */ |
@@ -342,8 +378,7 @@ struct nouveau_pgraph_engine { | |||
342 | int (*unload_context)(struct drm_device *); | 378 | int (*unload_context)(struct drm_device *); |
343 | void (*tlb_flush)(struct drm_device *dev); | 379 | void (*tlb_flush)(struct drm_device *dev); |
344 | 380 | ||
345 | void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, | 381 | void (*set_tile_region)(struct drm_device *dev, int i); |
346 | uint32_t size, uint32_t pitch); | ||
347 | }; | 382 | }; |
348 | 383 | ||
349 | struct nouveau_display_engine { | 384 | struct nouveau_display_engine { |
@@ -355,13 +390,19 @@ struct nouveau_display_engine { | |||
355 | }; | 390 | }; |
356 | 391 | ||
357 | struct nouveau_gpio_engine { | 392 | struct nouveau_gpio_engine { |
393 | void *priv; | ||
394 | |||
358 | int (*init)(struct drm_device *); | 395 | int (*init)(struct drm_device *); |
359 | void (*takedown)(struct drm_device *); | 396 | void (*takedown)(struct drm_device *); |
360 | 397 | ||
361 | int (*get)(struct drm_device *, enum dcb_gpio_tag); | 398 | int (*get)(struct drm_device *, enum dcb_gpio_tag); |
362 | int (*set)(struct drm_device *, enum dcb_gpio_tag, int state); | 399 | int (*set)(struct drm_device *, enum dcb_gpio_tag, int state); |
363 | 400 | ||
364 | void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); | 401 | int (*irq_register)(struct drm_device *, enum dcb_gpio_tag, |
402 | void (*)(void *, int), void *); | ||
403 | void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag, | ||
404 | void (*)(void *, int), void *); | ||
405 | bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); | ||
365 | }; | 406 | }; |
366 | 407 | ||
367 | struct nouveau_pm_voltage_level { | 408 | struct nouveau_pm_voltage_level { |
@@ -437,6 +478,7 @@ struct nouveau_pm_engine { | |||
437 | struct nouveau_pm_level *cur; | 478 | struct nouveau_pm_level *cur; |
438 | 479 | ||
439 | struct device *hwmon; | 480 | struct device *hwmon; |
481 | struct notifier_block acpi_nb; | ||
440 | 482 | ||
441 | int (*clock_get)(struct drm_device *, u32 id); | 483 | int (*clock_get)(struct drm_device *, u32 id); |
442 | void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, | 484 | void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, |
@@ -449,6 +491,25 @@ struct nouveau_pm_engine { | |||
449 | int (*temp_get)(struct drm_device *); | 491 | int (*temp_get)(struct drm_device *); |
450 | }; | 492 | }; |
451 | 493 | ||
494 | struct nouveau_crypt_engine { | ||
495 | bool registered; | ||
496 | |||
497 | int (*init)(struct drm_device *); | ||
498 | void (*takedown)(struct drm_device *); | ||
499 | int (*create_context)(struct nouveau_channel *); | ||
500 | void (*destroy_context)(struct nouveau_channel *); | ||
501 | void (*tlb_flush)(struct drm_device *dev); | ||
502 | }; | ||
503 | |||
504 | struct nouveau_vram_engine { | ||
505 | int (*init)(struct drm_device *); | ||
506 | int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, | ||
507 | u32 type, struct nouveau_vram **); | ||
508 | void (*put)(struct drm_device *, struct nouveau_vram **); | ||
509 | |||
510 | bool (*flags_valid)(struct drm_device *, u32 tile_flags); | ||
511 | }; | ||
512 | |||
452 | struct nouveau_engine { | 513 | struct nouveau_engine { |
453 | struct nouveau_instmem_engine instmem; | 514 | struct nouveau_instmem_engine instmem; |
454 | struct nouveau_mc_engine mc; | 515 | struct nouveau_mc_engine mc; |
@@ -459,6 +520,8 @@ struct nouveau_engine { | |||
459 | struct nouveau_display_engine display; | 520 | struct nouveau_display_engine display; |
460 | struct nouveau_gpio_engine gpio; | 521 | struct nouveau_gpio_engine gpio; |
461 | struct nouveau_pm_engine pm; | 522 | struct nouveau_pm_engine pm; |
523 | struct nouveau_crypt_engine crypt; | ||
524 | struct nouveau_vram_engine vram; | ||
462 | }; | 525 | }; |
463 | 526 | ||
464 | struct nouveau_pll_vals { | 527 | struct nouveau_pll_vals { |
@@ -577,18 +640,15 @@ struct drm_nouveau_private { | |||
577 | bool ramin_available; | 640 | bool ramin_available; |
578 | struct drm_mm ramin_heap; | 641 | struct drm_mm ramin_heap; |
579 | struct list_head gpuobj_list; | 642 | struct list_head gpuobj_list; |
643 | struct list_head classes; | ||
580 | 644 | ||
581 | struct nouveau_bo *vga_ram; | 645 | struct nouveau_bo *vga_ram; |
582 | 646 | ||
647 | /* interrupt handling */ | ||
648 | void (*irq_handler[32])(struct drm_device *); | ||
649 | bool msi_enabled; | ||
583 | struct workqueue_struct *wq; | 650 | struct workqueue_struct *wq; |
584 | struct work_struct irq_work; | 651 | struct work_struct irq_work; |
585 | struct work_struct hpd_work; | ||
586 | |||
587 | struct { | ||
588 | spinlock_t lock; | ||
589 | uint32_t hpd0_bits; | ||
590 | uint32_t hpd1_bits; | ||
591 | } hpd_state; | ||
592 | 652 | ||
593 | struct list_head vbl_waiting; | 653 | struct list_head vbl_waiting; |
594 | 654 | ||
@@ -605,8 +665,10 @@ struct drm_nouveau_private { | |||
605 | struct nouveau_bo *bo; | 665 | struct nouveau_bo *bo; |
606 | } fence; | 666 | } fence; |
607 | 667 | ||
608 | int fifo_alloc_count; | 668 | struct { |
609 | struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; | 669 | spinlock_t lock; |
670 | struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR]; | ||
671 | } channels; | ||
610 | 672 | ||
611 | struct nouveau_engine engine; | 673 | struct nouveau_engine engine; |
612 | struct nouveau_channel *channel; | 674 | struct nouveau_channel *channel; |
@@ -632,12 +694,14 @@ struct drm_nouveau_private { | |||
632 | uint64_t aper_free; | 694 | uint64_t aper_free; |
633 | 695 | ||
634 | struct nouveau_gpuobj *sg_ctxdma; | 696 | struct nouveau_gpuobj *sg_ctxdma; |
635 | struct page *sg_dummy_page; | 697 | struct nouveau_vma vma; |
636 | dma_addr_t sg_dummy_bus; | ||
637 | } gart_info; | 698 | } gart_info; |
638 | 699 | ||
639 | /* nv10-nv40 tiling regions */ | 700 | /* nv10-nv40 tiling regions */ |
640 | struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR]; | 701 | struct { |
702 | struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR]; | ||
703 | spinlock_t lock; | ||
704 | } tile; | ||
641 | 705 | ||
642 | /* VRAM/fb configuration */ | 706 | /* VRAM/fb configuration */ |
643 | uint64_t vram_size; | 707 | uint64_t vram_size; |
@@ -650,14 +714,12 @@ struct drm_nouveau_private { | |||
650 | uint64_t fb_aper_free; | 714 | uint64_t fb_aper_free; |
651 | int fb_mtrr; | 715 | int fb_mtrr; |
652 | 716 | ||
717 | /* BAR control (NV50-) */ | ||
718 | struct nouveau_vm *bar1_vm; | ||
719 | struct nouveau_vm *bar3_vm; | ||
720 | |||
653 | /* G8x/G9x virtual address space */ | 721 | /* G8x/G9x virtual address space */ |
654 | uint64_t vm_gart_base; | 722 | struct nouveau_vm *chan_vm; |
655 | uint64_t vm_gart_size; | ||
656 | uint64_t vm_vram_base; | ||
657 | uint64_t vm_vram_size; | ||
658 | uint64_t vm_end; | ||
659 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; | ||
660 | int vm_vram_pt_nr; | ||
661 | 723 | ||
662 | struct nvbios vbios; | 724 | struct nvbios vbios; |
663 | 725 | ||
@@ -674,6 +736,7 @@ struct drm_nouveau_private { | |||
674 | struct backlight_device *backlight; | 736 | struct backlight_device *backlight; |
675 | 737 | ||
676 | struct nouveau_channel *evo; | 738 | struct nouveau_channel *evo; |
739 | u32 evo_alloc; | ||
677 | struct { | 740 | struct { |
678 | struct dcb_entry *dcb; | 741 | struct dcb_entry *dcb; |
679 | u16 script; | 742 | u16 script; |
@@ -719,16 +782,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) | |||
719 | return 0; | 782 | return 0; |
720 | } | 783 | } |
721 | 784 | ||
722 | #define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \ | ||
723 | struct drm_nouveau_private *nv = dev->dev_private; \ | ||
724 | if (!nouveau_channel_owner(dev, (cl), (id))) { \ | ||
725 | NV_ERROR(dev, "pid %d doesn't own channel %d\n", \ | ||
726 | DRM_CURRENTPID, (id)); \ | ||
727 | return -EPERM; \ | ||
728 | } \ | ||
729 | (ch) = nv->fifos[(id)]; \ | ||
730 | } while (0) | ||
731 | |||
732 | /* nouveau_drv.c */ | 785 | /* nouveau_drv.c */ |
733 | extern int nouveau_agpmode; | 786 | extern int nouveau_agpmode; |
734 | extern int nouveau_duallink; | 787 | extern int nouveau_duallink; |
@@ -748,6 +801,7 @@ extern int nouveau_force_post; | |||
748 | extern int nouveau_override_conntype; | 801 | extern int nouveau_override_conntype; |
749 | extern char *nouveau_perflvl; | 802 | extern char *nouveau_perflvl; |
750 | extern int nouveau_perflvl_wr; | 803 | extern int nouveau_perflvl_wr; |
804 | extern int nouveau_msi; | ||
751 | 805 | ||
752 | extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); | 806 | extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); |
753 | extern int nouveau_pci_resume(struct pci_dev *pdev); | 807 | extern int nouveau_pci_resume(struct pci_dev *pdev); |
@@ -762,8 +816,10 @@ extern int nouveau_ioctl_getparam(struct drm_device *, void *data, | |||
762 | struct drm_file *); | 816 | struct drm_file *); |
763 | extern int nouveau_ioctl_setparam(struct drm_device *, void *data, | 817 | extern int nouveau_ioctl_setparam(struct drm_device *, void *data, |
764 | struct drm_file *); | 818 | struct drm_file *); |
765 | extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout, | 819 | extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout, |
766 | uint32_t reg, uint32_t mask, uint32_t val); | 820 | uint32_t reg, uint32_t mask, uint32_t val); |
821 | extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout, | ||
822 | uint32_t reg, uint32_t mask, uint32_t val); | ||
767 | extern bool nouveau_wait_for_idle(struct drm_device *); | 823 | extern bool nouveau_wait_for_idle(struct drm_device *); |
768 | extern int nouveau_card_init(struct drm_device *); | 824 | extern int nouveau_card_init(struct drm_device *); |
769 | 825 | ||
@@ -775,18 +831,15 @@ extern void nouveau_mem_gart_fini(struct drm_device *); | |||
775 | extern int nouveau_mem_init_agp(struct drm_device *); | 831 | extern int nouveau_mem_init_agp(struct drm_device *); |
776 | extern int nouveau_mem_reset_agp(struct drm_device *); | 832 | extern int nouveau_mem_reset_agp(struct drm_device *); |
777 | extern void nouveau_mem_close(struct drm_device *); | 833 | extern void nouveau_mem_close(struct drm_device *); |
778 | extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev, | 834 | extern int nouveau_mem_detect(struct drm_device *); |
779 | uint32_t addr, | 835 | extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags); |
780 | uint32_t size, | 836 | extern struct nouveau_tile_reg *nv10_mem_set_tiling( |
781 | uint32_t pitch); | 837 | struct drm_device *dev, uint32_t addr, uint32_t size, |
782 | extern void nv10_mem_expire_tiling(struct drm_device *dev, | 838 | uint32_t pitch, uint32_t flags); |
783 | struct nouveau_tile_reg *tile, | 839 | extern void nv10_mem_put_tile_region(struct drm_device *dev, |
784 | struct nouveau_fence *fence); | 840 | struct nouveau_tile_reg *tile, |
785 | extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, | 841 | struct nouveau_fence *fence); |
786 | uint32_t size, uint32_t flags, | 842 | extern const struct ttm_mem_type_manager_func nouveau_vram_manager; |
787 | uint64_t phys); | ||
788 | extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt, | ||
789 | uint32_t size); | ||
790 | 843 | ||
791 | /* nouveau_notifier.c */ | 844 | /* nouveau_notifier.c */ |
792 | extern int nouveau_notifier_init_channel(struct nouveau_channel *); | 845 | extern int nouveau_notifier_init_channel(struct nouveau_channel *); |
@@ -803,21 +856,44 @@ extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data, | |||
803 | extern struct drm_ioctl_desc nouveau_ioctls[]; | 856 | extern struct drm_ioctl_desc nouveau_ioctls[]; |
804 | extern int nouveau_max_ioctl; | 857 | extern int nouveau_max_ioctl; |
805 | extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); | 858 | extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); |
806 | extern int nouveau_channel_owner(struct drm_device *, struct drm_file *, | ||
807 | int channel); | ||
808 | extern int nouveau_channel_alloc(struct drm_device *dev, | 859 | extern int nouveau_channel_alloc(struct drm_device *dev, |
809 | struct nouveau_channel **chan, | 860 | struct nouveau_channel **chan, |
810 | struct drm_file *file_priv, | 861 | struct drm_file *file_priv, |
811 | uint32_t fb_ctxdma, uint32_t tt_ctxdma); | 862 | uint32_t fb_ctxdma, uint32_t tt_ctxdma); |
812 | extern void nouveau_channel_free(struct nouveau_channel *); | 863 | extern struct nouveau_channel * |
864 | nouveau_channel_get_unlocked(struct nouveau_channel *); | ||
865 | extern struct nouveau_channel * | ||
866 | nouveau_channel_get(struct drm_device *, struct drm_file *, int id); | ||
867 | extern void nouveau_channel_put_unlocked(struct nouveau_channel **); | ||
868 | extern void nouveau_channel_put(struct nouveau_channel **); | ||
869 | extern void nouveau_channel_ref(struct nouveau_channel *chan, | ||
870 | struct nouveau_channel **pchan); | ||
871 | extern void nouveau_channel_idle(struct nouveau_channel *chan); | ||
813 | 872 | ||
814 | /* nouveau_object.c */ | 873 | /* nouveau_object.c */ |
874 | #define NVOBJ_CLASS(d,c,e) do { \ | ||
875 | int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \ | ||
876 | if (ret) \ | ||
877 | return ret; \ | ||
878 | } while(0) | ||
879 | |||
880 | #define NVOBJ_MTHD(d,c,m,e) do { \ | ||
881 | int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \ | ||
882 | if (ret) \ | ||
883 | return ret; \ | ||
884 | } while(0) | ||
885 | |||
815 | extern int nouveau_gpuobj_early_init(struct drm_device *); | 886 | extern int nouveau_gpuobj_early_init(struct drm_device *); |
816 | extern int nouveau_gpuobj_init(struct drm_device *); | 887 | extern int nouveau_gpuobj_init(struct drm_device *); |
817 | extern void nouveau_gpuobj_takedown(struct drm_device *); | 888 | extern void nouveau_gpuobj_takedown(struct drm_device *); |
818 | extern int nouveau_gpuobj_suspend(struct drm_device *dev); | 889 | extern int nouveau_gpuobj_suspend(struct drm_device *dev); |
819 | extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev); | ||
820 | extern void nouveau_gpuobj_resume(struct drm_device *dev); | 890 | extern void nouveau_gpuobj_resume(struct drm_device *dev); |
891 | extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng); | ||
892 | extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd, | ||
893 | int (*exec)(struct nouveau_channel *, | ||
894 | u32 class, u32 mthd, u32 data)); | ||
895 | extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32); | ||
896 | extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32); | ||
821 | extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, | 897 | extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, |
822 | uint32_t vram_h, uint32_t tt_h); | 898 | uint32_t vram_h, uint32_t tt_h); |
823 | extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); | 899 | extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); |
@@ -832,21 +908,25 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst, | |||
832 | extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, | 908 | extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, |
833 | uint64_t offset, uint64_t size, int access, | 909 | uint64_t offset, uint64_t size, int access, |
834 | int target, struct nouveau_gpuobj **); | 910 | int target, struct nouveau_gpuobj **); |
835 | extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, | 911 | extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class); |
836 | uint64_t offset, uint64_t size, | 912 | extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base, |
837 | int access, struct nouveau_gpuobj **, | 913 | u64 size, int target, int access, u32 type, |
838 | uint32_t *o_ret); | 914 | u32 comp, struct nouveau_gpuobj **pobj); |
839 | extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, | 915 | extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset, |
840 | struct nouveau_gpuobj **); | 916 | int class, u64 base, u64 size, int target, |
841 | extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class, | 917 | int access, u32 type, u32 comp); |
842 | struct nouveau_gpuobj **); | ||
843 | extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, | 918 | extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, |
844 | struct drm_file *); | 919 | struct drm_file *); |
845 | extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, | 920 | extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, |
846 | struct drm_file *); | 921 | struct drm_file *); |
847 | 922 | ||
848 | /* nouveau_irq.c */ | 923 | /* nouveau_irq.c */ |
924 | extern int nouveau_irq_init(struct drm_device *); | ||
925 | extern void nouveau_irq_fini(struct drm_device *); | ||
849 | extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); | 926 | extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); |
927 | extern void nouveau_irq_register(struct drm_device *, int status_bit, | ||
928 | void (*)(struct drm_device *)); | ||
929 | extern void nouveau_irq_unregister(struct drm_device *, int status_bit); | ||
850 | extern void nouveau_irq_preinstall(struct drm_device *); | 930 | extern void nouveau_irq_preinstall(struct drm_device *); |
851 | extern int nouveau_irq_postinstall(struct drm_device *); | 931 | extern int nouveau_irq_postinstall(struct drm_device *); |
852 | extern void nouveau_irq_uninstall(struct drm_device *); | 932 | extern void nouveau_irq_uninstall(struct drm_device *); |
@@ -854,8 +934,8 @@ extern void nouveau_irq_uninstall(struct drm_device *); | |||
854 | /* nouveau_sgdma.c */ | 934 | /* nouveau_sgdma.c */ |
855 | extern int nouveau_sgdma_init(struct drm_device *); | 935 | extern int nouveau_sgdma_init(struct drm_device *); |
856 | extern void nouveau_sgdma_takedown(struct drm_device *); | 936 | extern void nouveau_sgdma_takedown(struct drm_device *); |
857 | extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset, | 937 | extern uint32_t nouveau_sgdma_get_physical(struct drm_device *, |
858 | uint32_t *page); | 938 | uint32_t offset); |
859 | extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); | 939 | extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); |
860 | 940 | ||
861 | /* nouveau_debugfs.c */ | 941 | /* nouveau_debugfs.c */ |
@@ -966,18 +1046,25 @@ extern void nv04_fb_takedown(struct drm_device *); | |||
966 | /* nv10_fb.c */ | 1046 | /* nv10_fb.c */ |
967 | extern int nv10_fb_init(struct drm_device *); | 1047 | extern int nv10_fb_init(struct drm_device *); |
968 | extern void nv10_fb_takedown(struct drm_device *); | 1048 | extern void nv10_fb_takedown(struct drm_device *); |
969 | extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t, | 1049 | extern void nv10_fb_init_tile_region(struct drm_device *dev, int i, |
970 | uint32_t, uint32_t); | 1050 | uint32_t addr, uint32_t size, |
1051 | uint32_t pitch, uint32_t flags); | ||
1052 | extern void nv10_fb_set_tile_region(struct drm_device *dev, int i); | ||
1053 | extern void nv10_fb_free_tile_region(struct drm_device *dev, int i); | ||
971 | 1054 | ||
972 | /* nv30_fb.c */ | 1055 | /* nv30_fb.c */ |
973 | extern int nv30_fb_init(struct drm_device *); | 1056 | extern int nv30_fb_init(struct drm_device *); |
974 | extern void nv30_fb_takedown(struct drm_device *); | 1057 | extern void nv30_fb_takedown(struct drm_device *); |
1058 | extern void nv30_fb_init_tile_region(struct drm_device *dev, int i, | ||
1059 | uint32_t addr, uint32_t size, | ||
1060 | uint32_t pitch, uint32_t flags); | ||
1061 | extern void nv30_fb_free_tile_region(struct drm_device *dev, int i); | ||
975 | 1062 | ||
976 | /* nv40_fb.c */ | 1063 | /* nv40_fb.c */ |
977 | extern int nv40_fb_init(struct drm_device *); | 1064 | extern int nv40_fb_init(struct drm_device *); |
978 | extern void nv40_fb_takedown(struct drm_device *); | 1065 | extern void nv40_fb_takedown(struct drm_device *); |
979 | extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, | 1066 | extern void nv40_fb_set_tile_region(struct drm_device *dev, int i); |
980 | uint32_t, uint32_t); | 1067 | |
981 | /* nv50_fb.c */ | 1068 | /* nv50_fb.c */ |
982 | extern int nv50_fb_init(struct drm_device *); | 1069 | extern int nv50_fb_init(struct drm_device *); |
983 | extern void nv50_fb_takedown(struct drm_device *); | 1070 | extern void nv50_fb_takedown(struct drm_device *); |
@@ -989,6 +1076,7 @@ extern void nvc0_fb_takedown(struct drm_device *); | |||
989 | 1076 | ||
990 | /* nv04_fifo.c */ | 1077 | /* nv04_fifo.c */ |
991 | extern int nv04_fifo_init(struct drm_device *); | 1078 | extern int nv04_fifo_init(struct drm_device *); |
1079 | extern void nv04_fifo_fini(struct drm_device *); | ||
992 | extern void nv04_fifo_disable(struct drm_device *); | 1080 | extern void nv04_fifo_disable(struct drm_device *); |
993 | extern void nv04_fifo_enable(struct drm_device *); | 1081 | extern void nv04_fifo_enable(struct drm_device *); |
994 | extern bool nv04_fifo_reassign(struct drm_device *, bool); | 1082 | extern bool nv04_fifo_reassign(struct drm_device *, bool); |
@@ -998,19 +1086,18 @@ extern int nv04_fifo_create_context(struct nouveau_channel *); | |||
998 | extern void nv04_fifo_destroy_context(struct nouveau_channel *); | 1086 | extern void nv04_fifo_destroy_context(struct nouveau_channel *); |
999 | extern int nv04_fifo_load_context(struct nouveau_channel *); | 1087 | extern int nv04_fifo_load_context(struct nouveau_channel *); |
1000 | extern int nv04_fifo_unload_context(struct drm_device *); | 1088 | extern int nv04_fifo_unload_context(struct drm_device *); |
1089 | extern void nv04_fifo_isr(struct drm_device *); | ||
1001 | 1090 | ||
1002 | /* nv10_fifo.c */ | 1091 | /* nv10_fifo.c */ |
1003 | extern int nv10_fifo_init(struct drm_device *); | 1092 | extern int nv10_fifo_init(struct drm_device *); |
1004 | extern int nv10_fifo_channel_id(struct drm_device *); | 1093 | extern int nv10_fifo_channel_id(struct drm_device *); |
1005 | extern int nv10_fifo_create_context(struct nouveau_channel *); | 1094 | extern int nv10_fifo_create_context(struct nouveau_channel *); |
1006 | extern void nv10_fifo_destroy_context(struct nouveau_channel *); | ||
1007 | extern int nv10_fifo_load_context(struct nouveau_channel *); | 1095 | extern int nv10_fifo_load_context(struct nouveau_channel *); |
1008 | extern int nv10_fifo_unload_context(struct drm_device *); | 1096 | extern int nv10_fifo_unload_context(struct drm_device *); |
1009 | 1097 | ||
1010 | /* nv40_fifo.c */ | 1098 | /* nv40_fifo.c */ |
1011 | extern int nv40_fifo_init(struct drm_device *); | 1099 | extern int nv40_fifo_init(struct drm_device *); |
1012 | extern int nv40_fifo_create_context(struct nouveau_channel *); | 1100 | extern int nv40_fifo_create_context(struct nouveau_channel *); |
1013 | extern void nv40_fifo_destroy_context(struct nouveau_channel *); | ||
1014 | extern int nv40_fifo_load_context(struct nouveau_channel *); | 1101 | extern int nv40_fifo_load_context(struct nouveau_channel *); |
1015 | extern int nv40_fifo_unload_context(struct drm_device *); | 1102 | extern int nv40_fifo_unload_context(struct drm_device *); |
1016 | 1103 | ||
@@ -1038,7 +1125,6 @@ extern int nvc0_fifo_load_context(struct nouveau_channel *); | |||
1038 | extern int nvc0_fifo_unload_context(struct drm_device *); | 1125 | extern int nvc0_fifo_unload_context(struct drm_device *); |
1039 | 1126 | ||
1040 | /* nv04_graph.c */ | 1127 | /* nv04_graph.c */ |
1041 | extern struct nouveau_pgraph_object_class nv04_graph_grclass[]; | ||
1042 | extern int nv04_graph_init(struct drm_device *); | 1128 | extern int nv04_graph_init(struct drm_device *); |
1043 | extern void nv04_graph_takedown(struct drm_device *); | 1129 | extern void nv04_graph_takedown(struct drm_device *); |
1044 | extern void nv04_graph_fifo_access(struct drm_device *, bool); | 1130 | extern void nv04_graph_fifo_access(struct drm_device *, bool); |
@@ -1047,10 +1133,11 @@ extern int nv04_graph_create_context(struct nouveau_channel *); | |||
1047 | extern void nv04_graph_destroy_context(struct nouveau_channel *); | 1133 | extern void nv04_graph_destroy_context(struct nouveau_channel *); |
1048 | extern int nv04_graph_load_context(struct nouveau_channel *); | 1134 | extern int nv04_graph_load_context(struct nouveau_channel *); |
1049 | extern int nv04_graph_unload_context(struct drm_device *); | 1135 | extern int nv04_graph_unload_context(struct drm_device *); |
1050 | extern void nv04_graph_context_switch(struct drm_device *); | 1136 | extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, |
1137 | u32 class, u32 mthd, u32 data); | ||
1138 | extern struct nouveau_bitfield nv04_graph_nsource[]; | ||
1051 | 1139 | ||
1052 | /* nv10_graph.c */ | 1140 | /* nv10_graph.c */ |
1053 | extern struct nouveau_pgraph_object_class nv10_graph_grclass[]; | ||
1054 | extern int nv10_graph_init(struct drm_device *); | 1141 | extern int nv10_graph_init(struct drm_device *); |
1055 | extern void nv10_graph_takedown(struct drm_device *); | 1142 | extern void nv10_graph_takedown(struct drm_device *); |
1056 | extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); | 1143 | extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); |
@@ -1058,13 +1145,11 @@ extern int nv10_graph_create_context(struct nouveau_channel *); | |||
1058 | extern void nv10_graph_destroy_context(struct nouveau_channel *); | 1145 | extern void nv10_graph_destroy_context(struct nouveau_channel *); |
1059 | extern int nv10_graph_load_context(struct nouveau_channel *); | 1146 | extern int nv10_graph_load_context(struct nouveau_channel *); |
1060 | extern int nv10_graph_unload_context(struct drm_device *); | 1147 | extern int nv10_graph_unload_context(struct drm_device *); |
1061 | extern void nv10_graph_context_switch(struct drm_device *); | 1148 | extern void nv10_graph_set_tile_region(struct drm_device *dev, int i); |
1062 | extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t, | 1149 | extern struct nouveau_bitfield nv10_graph_intr[]; |
1063 | uint32_t, uint32_t); | 1150 | extern struct nouveau_bitfield nv10_graph_nstatus[]; |
1064 | 1151 | ||
1065 | /* nv20_graph.c */ | 1152 | /* nv20_graph.c */ |
1066 | extern struct nouveau_pgraph_object_class nv20_graph_grclass[]; | ||
1067 | extern struct nouveau_pgraph_object_class nv30_graph_grclass[]; | ||
1068 | extern int nv20_graph_create_context(struct nouveau_channel *); | 1153 | extern int nv20_graph_create_context(struct nouveau_channel *); |
1069 | extern void nv20_graph_destroy_context(struct nouveau_channel *); | 1154 | extern void nv20_graph_destroy_context(struct nouveau_channel *); |
1070 | extern int nv20_graph_load_context(struct nouveau_channel *); | 1155 | extern int nv20_graph_load_context(struct nouveau_channel *); |
@@ -1072,11 +1157,9 @@ extern int nv20_graph_unload_context(struct drm_device *); | |||
1072 | extern int nv20_graph_init(struct drm_device *); | 1157 | extern int nv20_graph_init(struct drm_device *); |
1073 | extern void nv20_graph_takedown(struct drm_device *); | 1158 | extern void nv20_graph_takedown(struct drm_device *); |
1074 | extern int nv30_graph_init(struct drm_device *); | 1159 | extern int nv30_graph_init(struct drm_device *); |
1075 | extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t, | 1160 | extern void nv20_graph_set_tile_region(struct drm_device *dev, int i); |
1076 | uint32_t, uint32_t); | ||
1077 | 1161 | ||
1078 | /* nv40_graph.c */ | 1162 | /* nv40_graph.c */ |
1079 | extern struct nouveau_pgraph_object_class nv40_graph_grclass[]; | ||
1080 | extern int nv40_graph_init(struct drm_device *); | 1163 | extern int nv40_graph_init(struct drm_device *); |
1081 | extern void nv40_graph_takedown(struct drm_device *); | 1164 | extern void nv40_graph_takedown(struct drm_device *); |
1082 | extern struct nouveau_channel *nv40_graph_channel(struct drm_device *); | 1165 | extern struct nouveau_channel *nv40_graph_channel(struct drm_device *); |
@@ -1085,11 +1168,9 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *); | |||
1085 | extern int nv40_graph_load_context(struct nouveau_channel *); | 1168 | extern int nv40_graph_load_context(struct nouveau_channel *); |
1086 | extern int nv40_graph_unload_context(struct drm_device *); | 1169 | extern int nv40_graph_unload_context(struct drm_device *); |
1087 | extern void nv40_grctx_init(struct nouveau_grctx *); | 1170 | extern void nv40_grctx_init(struct nouveau_grctx *); |
1088 | extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t, | 1171 | extern void nv40_graph_set_tile_region(struct drm_device *dev, int i); |
1089 | uint32_t, uint32_t); | ||
1090 | 1172 | ||
1091 | /* nv50_graph.c */ | 1173 | /* nv50_graph.c */ |
1092 | extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; | ||
1093 | extern int nv50_graph_init(struct drm_device *); | 1174 | extern int nv50_graph_init(struct drm_device *); |
1094 | extern void nv50_graph_takedown(struct drm_device *); | 1175 | extern void nv50_graph_takedown(struct drm_device *); |
1095 | extern void nv50_graph_fifo_access(struct drm_device *, bool); | 1176 | extern void nv50_graph_fifo_access(struct drm_device *, bool); |
@@ -1098,7 +1179,6 @@ extern int nv50_graph_create_context(struct nouveau_channel *); | |||
1098 | extern void nv50_graph_destroy_context(struct nouveau_channel *); | 1179 | extern void nv50_graph_destroy_context(struct nouveau_channel *); |
1099 | extern int nv50_graph_load_context(struct nouveau_channel *); | 1180 | extern int nv50_graph_load_context(struct nouveau_channel *); |
1100 | extern int nv50_graph_unload_context(struct drm_device *); | 1181 | extern int nv50_graph_unload_context(struct drm_device *); |
1101 | extern void nv50_graph_context_switch(struct drm_device *); | ||
1102 | extern int nv50_grctx_init(struct nouveau_grctx *); | 1182 | extern int nv50_grctx_init(struct nouveau_grctx *); |
1103 | extern void nv50_graph_tlb_flush(struct drm_device *dev); | 1183 | extern void nv50_graph_tlb_flush(struct drm_device *dev); |
1104 | extern void nv86_graph_tlb_flush(struct drm_device *dev); | 1184 | extern void nv86_graph_tlb_flush(struct drm_device *dev); |
@@ -1113,16 +1193,22 @@ extern void nvc0_graph_destroy_context(struct nouveau_channel *); | |||
1113 | extern int nvc0_graph_load_context(struct nouveau_channel *); | 1193 | extern int nvc0_graph_load_context(struct nouveau_channel *); |
1114 | extern int nvc0_graph_unload_context(struct drm_device *); | 1194 | extern int nvc0_graph_unload_context(struct drm_device *); |
1115 | 1195 | ||
1196 | /* nv84_crypt.c */ | ||
1197 | extern int nv84_crypt_init(struct drm_device *dev); | ||
1198 | extern void nv84_crypt_fini(struct drm_device *dev); | ||
1199 | extern int nv84_crypt_create_context(struct nouveau_channel *); | ||
1200 | extern void nv84_crypt_destroy_context(struct nouveau_channel *); | ||
1201 | extern void nv84_crypt_tlb_flush(struct drm_device *dev); | ||
1202 | |||
1116 | /* nv04_instmem.c */ | 1203 | /* nv04_instmem.c */ |
1117 | extern int nv04_instmem_init(struct drm_device *); | 1204 | extern int nv04_instmem_init(struct drm_device *); |
1118 | extern void nv04_instmem_takedown(struct drm_device *); | 1205 | extern void nv04_instmem_takedown(struct drm_device *); |
1119 | extern int nv04_instmem_suspend(struct drm_device *); | 1206 | extern int nv04_instmem_suspend(struct drm_device *); |
1120 | extern void nv04_instmem_resume(struct drm_device *); | 1207 | extern void nv04_instmem_resume(struct drm_device *); |
1121 | extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, | 1208 | extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); |
1122 | uint32_t *size); | 1209 | extern void nv04_instmem_put(struct nouveau_gpuobj *); |
1123 | extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); | 1210 | extern int nv04_instmem_map(struct nouveau_gpuobj *); |
1124 | extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); | 1211 | extern void nv04_instmem_unmap(struct nouveau_gpuobj *); |
1125 | extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); | ||
1126 | extern void nv04_instmem_flush(struct drm_device *); | 1212 | extern void nv04_instmem_flush(struct drm_device *); |
1127 | 1213 | ||
1128 | /* nv50_instmem.c */ | 1214 | /* nv50_instmem.c */ |
@@ -1130,25 +1216,22 @@ extern int nv50_instmem_init(struct drm_device *); | |||
1130 | extern void nv50_instmem_takedown(struct drm_device *); | 1216 | extern void nv50_instmem_takedown(struct drm_device *); |
1131 | extern int nv50_instmem_suspend(struct drm_device *); | 1217 | extern int nv50_instmem_suspend(struct drm_device *); |
1132 | extern void nv50_instmem_resume(struct drm_device *); | 1218 | extern void nv50_instmem_resume(struct drm_device *); |
1133 | extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, | 1219 | extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); |
1134 | uint32_t *size); | 1220 | extern void nv50_instmem_put(struct nouveau_gpuobj *); |
1135 | extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); | 1221 | extern int nv50_instmem_map(struct nouveau_gpuobj *); |
1136 | extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); | 1222 | extern void nv50_instmem_unmap(struct nouveau_gpuobj *); |
1137 | extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); | ||
1138 | extern void nv50_instmem_flush(struct drm_device *); | 1223 | extern void nv50_instmem_flush(struct drm_device *); |
1139 | extern void nv84_instmem_flush(struct drm_device *); | 1224 | extern void nv84_instmem_flush(struct drm_device *); |
1140 | extern void nv50_vm_flush(struct drm_device *, int engine); | ||
1141 | 1225 | ||
1142 | /* nvc0_instmem.c */ | 1226 | /* nvc0_instmem.c */ |
1143 | extern int nvc0_instmem_init(struct drm_device *); | 1227 | extern int nvc0_instmem_init(struct drm_device *); |
1144 | extern void nvc0_instmem_takedown(struct drm_device *); | 1228 | extern void nvc0_instmem_takedown(struct drm_device *); |
1145 | extern int nvc0_instmem_suspend(struct drm_device *); | 1229 | extern int nvc0_instmem_suspend(struct drm_device *); |
1146 | extern void nvc0_instmem_resume(struct drm_device *); | 1230 | extern void nvc0_instmem_resume(struct drm_device *); |
1147 | extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, | 1231 | extern int nvc0_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); |
1148 | uint32_t *size); | 1232 | extern void nvc0_instmem_put(struct nouveau_gpuobj *); |
1149 | extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); | 1233 | extern int nvc0_instmem_map(struct nouveau_gpuobj *); |
1150 | extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); | 1234 | extern void nvc0_instmem_unmap(struct nouveau_gpuobj *); |
1151 | extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); | ||
1152 | extern void nvc0_instmem_flush(struct drm_device *); | 1235 | extern void nvc0_instmem_flush(struct drm_device *); |
1153 | 1236 | ||
1154 | /* nv04_mc.c */ | 1237 | /* nv04_mc.c */ |
@@ -1219,6 +1302,9 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); | |||
1219 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); | 1302 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); |
1220 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); | 1303 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); |
1221 | extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); | 1304 | extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); |
1305 | extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); | ||
1306 | extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, | ||
1307 | bool no_wait_reserve, bool no_wait_gpu); | ||
1222 | 1308 | ||
1223 | /* nouveau_fence.c */ | 1309 | /* nouveau_fence.c */ |
1224 | struct nouveau_fence; | 1310 | struct nouveau_fence; |
@@ -1234,12 +1320,35 @@ extern void nouveau_fence_work(struct nouveau_fence *fence, | |||
1234 | void (*work)(void *priv, bool signalled), | 1320 | void (*work)(void *priv, bool signalled), |
1235 | void *priv); | 1321 | void *priv); |
1236 | struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); | 1322 | struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); |
1237 | extern bool nouveau_fence_signalled(void *obj, void *arg); | 1323 | |
1238 | extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); | 1324 | extern bool __nouveau_fence_signalled(void *obj, void *arg); |
1325 | extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); | ||
1326 | extern int __nouveau_fence_flush(void *obj, void *arg); | ||
1327 | extern void __nouveau_fence_unref(void **obj); | ||
1328 | extern void *__nouveau_fence_ref(void *obj); | ||
1329 | |||
1330 | static inline bool nouveau_fence_signalled(struct nouveau_fence *obj) | ||
1331 | { | ||
1332 | return __nouveau_fence_signalled(obj, NULL); | ||
1333 | } | ||
1334 | static inline int | ||
1335 | nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr) | ||
1336 | { | ||
1337 | return __nouveau_fence_wait(obj, NULL, lazy, intr); | ||
1338 | } | ||
1239 | extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); | 1339 | extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); |
1240 | extern int nouveau_fence_flush(void *obj, void *arg); | 1340 | static inline int nouveau_fence_flush(struct nouveau_fence *obj) |
1241 | extern void nouveau_fence_unref(void **obj); | 1341 | { |
1242 | extern void *nouveau_fence_ref(void *obj); | 1342 | return __nouveau_fence_flush(obj, NULL); |
1343 | } | ||
1344 | static inline void nouveau_fence_unref(struct nouveau_fence **obj) | ||
1345 | { | ||
1346 | __nouveau_fence_unref((void **)obj); | ||
1347 | } | ||
1348 | static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj) | ||
1349 | { | ||
1350 | return __nouveau_fence_ref(obj); | ||
1351 | } | ||
1243 | 1352 | ||
1244 | /* nouveau_gem.c */ | 1353 | /* nouveau_gem.c */ |
1245 | extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, | 1354 | extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, |
@@ -1259,15 +1368,28 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, | |||
1259 | extern int nouveau_gem_ioctl_info(struct drm_device *, void *, | 1368 | extern int nouveau_gem_ioctl_info(struct drm_device *, void *, |
1260 | struct drm_file *); | 1369 | struct drm_file *); |
1261 | 1370 | ||
1371 | /* nouveau_display.c */ | ||
1372 | int nouveau_vblank_enable(struct drm_device *dev, int crtc); | ||
1373 | void nouveau_vblank_disable(struct drm_device *dev, int crtc); | ||
1374 | int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
1375 | struct drm_pending_vblank_event *event); | ||
1376 | int nouveau_finish_page_flip(struct nouveau_channel *, | ||
1377 | struct nouveau_page_flip_state *); | ||
1378 | |||
1262 | /* nv10_gpio.c */ | 1379 | /* nv10_gpio.c */ |
1263 | int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); | 1380 | int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); |
1264 | int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); | 1381 | int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); |
1265 | 1382 | ||
1266 | /* nv50_gpio.c */ | 1383 | /* nv50_gpio.c */ |
1267 | int nv50_gpio_init(struct drm_device *dev); | 1384 | int nv50_gpio_init(struct drm_device *dev); |
1385 | void nv50_gpio_fini(struct drm_device *dev); | ||
1268 | int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); | 1386 | int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); |
1269 | int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); | 1387 | int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); |
1270 | void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); | 1388 | int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag, |
1389 | void (*)(void *, int), void *); | ||
1390 | void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag, | ||
1391 | void (*)(void *, int), void *); | ||
1392 | bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); | ||
1271 | 1393 | ||
1272 | /* nv50_calc. */ | 1394 | /* nv50_calc. */ |
1273 | int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, | 1395 | int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, |
@@ -1334,7 +1456,9 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val) | |||
1334 | } | 1456 | } |
1335 | 1457 | ||
1336 | #define nv_wait(dev, reg, mask, val) \ | 1458 | #define nv_wait(dev, reg, mask, val) \ |
1337 | nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val)) | 1459 | nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val)) |
1460 | #define nv_wait_ne(dev, reg, mask, val) \ | ||
1461 | nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val)) | ||
1338 | 1462 | ||
1339 | /* PRAMIN access */ | 1463 | /* PRAMIN access */ |
1340 | static inline u32 nv_ri32(struct drm_device *dev, unsigned offset) | 1464 | static inline u32 nv_ri32(struct drm_device *dev, unsigned offset) |
@@ -1447,6 +1571,23 @@ nv_match_device(struct drm_device *dev, unsigned device, | |||
1447 | dev->pdev->subsystem_device == sub_device; | 1571 | dev->pdev->subsystem_device == sub_device; |
1448 | } | 1572 | } |
1449 | 1573 | ||
1574 | /* memory type/access flags, do not match hardware values */ | ||
1575 | #define NV_MEM_ACCESS_RO 1 | ||
1576 | #define NV_MEM_ACCESS_WO 2 | ||
1577 | #define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO) | ||
1578 | #define NV_MEM_ACCESS_SYS 4 | ||
1579 | #define NV_MEM_ACCESS_VM 8 | ||
1580 | |||
1581 | #define NV_MEM_TARGET_VRAM 0 | ||
1582 | #define NV_MEM_TARGET_PCI 1 | ||
1583 | #define NV_MEM_TARGET_PCI_NOSNOOP 2 | ||
1584 | #define NV_MEM_TARGET_VM 3 | ||
1585 | #define NV_MEM_TARGET_GART 4 | ||
1586 | |||
1587 | #define NV_MEM_TYPE_VM 0x7f | ||
1588 | #define NV_MEM_COMP_VM 0x03 | ||
1589 | |||
1590 | /* NV_SW object class */ | ||
1450 | #define NV_SW 0x0000506e | 1591 | #define NV_SW 0x0000506e |
1451 | #define NV_SW_DMA_SEMAPHORE 0x00000060 | 1592 | #define NV_SW_DMA_SEMAPHORE 0x00000060 |
1452 | #define NV_SW_SEMAPHORE_OFFSET 0x00000064 | 1593 | #define NV_SW_SEMAPHORE_OFFSET 0x00000064 |
@@ -1457,5 +1598,6 @@ nv_match_device(struct drm_device *dev, unsigned device, | |||
1457 | #define NV_SW_VBLSEM_OFFSET 0x00000400 | 1598 | #define NV_SW_VBLSEM_OFFSET 0x00000400 |
1458 | #define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 | 1599 | #define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 |
1459 | #define NV_SW_VBLSEM_RELEASE 0x00000408 | 1600 | #define NV_SW_VBLSEM_RELEASE 0x00000408 |
1601 | #define NV_SW_PAGE_FLIP 0x00000500 | ||
1460 | 1602 | ||
1461 | #endif /* __NOUVEAU_DRV_H__ */ | 1603 | #endif /* __NOUVEAU_DRV_H__ */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 02a4d1fd4845..ea861c915149 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -49,6 +49,96 @@ | |||
49 | #include "nouveau_fbcon.h" | 49 | #include "nouveau_fbcon.h" |
50 | #include "nouveau_dma.h" | 50 | #include "nouveau_dma.h" |
51 | 51 | ||
52 | static void | ||
53 | nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | ||
54 | { | ||
55 | struct nouveau_fbdev *nfbdev = info->par; | ||
56 | struct drm_device *dev = nfbdev->dev; | ||
57 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
58 | int ret; | ||
59 | |||
60 | if (info->state != FBINFO_STATE_RUNNING) | ||
61 | return; | ||
62 | |||
63 | ret = -ENODEV; | ||
64 | if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && | ||
65 | mutex_trylock(&dev_priv->channel->mutex)) { | ||
66 | if (dev_priv->card_type < NV_50) | ||
67 | ret = nv04_fbcon_fillrect(info, rect); | ||
68 | else | ||
69 | if (dev_priv->card_type < NV_C0) | ||
70 | ret = nv50_fbcon_fillrect(info, rect); | ||
71 | mutex_unlock(&dev_priv->channel->mutex); | ||
72 | } | ||
73 | |||
74 | if (ret == 0) | ||
75 | return; | ||
76 | |||
77 | if (ret != -ENODEV) | ||
78 | nouveau_fbcon_gpu_lockup(info); | ||
79 | cfb_fillrect(info, rect); | ||
80 | } | ||
81 | |||
82 | static void | ||
83 | nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) | ||
84 | { | ||
85 | struct nouveau_fbdev *nfbdev = info->par; | ||
86 | struct drm_device *dev = nfbdev->dev; | ||
87 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
88 | int ret; | ||
89 | |||
90 | if (info->state != FBINFO_STATE_RUNNING) | ||
91 | return; | ||
92 | |||
93 | ret = -ENODEV; | ||
94 | if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && | ||
95 | mutex_trylock(&dev_priv->channel->mutex)) { | ||
96 | if (dev_priv->card_type < NV_50) | ||
97 | ret = nv04_fbcon_copyarea(info, image); | ||
98 | else | ||
99 | if (dev_priv->card_type < NV_C0) | ||
100 | ret = nv50_fbcon_copyarea(info, image); | ||
101 | mutex_unlock(&dev_priv->channel->mutex); | ||
102 | } | ||
103 | |||
104 | if (ret == 0) | ||
105 | return; | ||
106 | |||
107 | if (ret != -ENODEV) | ||
108 | nouveau_fbcon_gpu_lockup(info); | ||
109 | cfb_copyarea(info, image); | ||
110 | } | ||
111 | |||
112 | static void | ||
113 | nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | ||
114 | { | ||
115 | struct nouveau_fbdev *nfbdev = info->par; | ||
116 | struct drm_device *dev = nfbdev->dev; | ||
117 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
118 | int ret; | ||
119 | |||
120 | if (info->state != FBINFO_STATE_RUNNING) | ||
121 | return; | ||
122 | |||
123 | ret = -ENODEV; | ||
124 | if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && | ||
125 | mutex_trylock(&dev_priv->channel->mutex)) { | ||
126 | if (dev_priv->card_type < NV_50) | ||
127 | ret = nv04_fbcon_imageblit(info, image); | ||
128 | else | ||
129 | if (dev_priv->card_type < NV_C0) | ||
130 | ret = nv50_fbcon_imageblit(info, image); | ||
131 | mutex_unlock(&dev_priv->channel->mutex); | ||
132 | } | ||
133 | |||
134 | if (ret == 0) | ||
135 | return; | ||
136 | |||
137 | if (ret != -ENODEV) | ||
138 | nouveau_fbcon_gpu_lockup(info); | ||
139 | cfb_imageblit(info, image); | ||
140 | } | ||
141 | |||
52 | static int | 142 | static int |
53 | nouveau_fbcon_sync(struct fb_info *info) | 143 | nouveau_fbcon_sync(struct fb_info *info) |
54 | { | 144 | { |
@@ -58,12 +148,17 @@ nouveau_fbcon_sync(struct fb_info *info) | |||
58 | struct nouveau_channel *chan = dev_priv->channel; | 148 | struct nouveau_channel *chan = dev_priv->channel; |
59 | int ret, i; | 149 | int ret, i; |
60 | 150 | ||
61 | if (!chan || !chan->accel_done || | 151 | if (!chan || !chan->accel_done || in_interrupt() || |
62 | info->state != FBINFO_STATE_RUNNING || | 152 | info->state != FBINFO_STATE_RUNNING || |
63 | info->flags & FBINFO_HWACCEL_DISABLED) | 153 | info->flags & FBINFO_HWACCEL_DISABLED) |
64 | return 0; | 154 | return 0; |
65 | 155 | ||
66 | if (RING_SPACE(chan, 4)) { | 156 | if (!mutex_trylock(&chan->mutex)) |
157 | return 0; | ||
158 | |||
159 | ret = RING_SPACE(chan, 4); | ||
160 | if (ret) { | ||
161 | mutex_unlock(&chan->mutex); | ||
67 | nouveau_fbcon_gpu_lockup(info); | 162 | nouveau_fbcon_gpu_lockup(info); |
68 | return 0; | 163 | return 0; |
69 | } | 164 | } |
@@ -74,6 +169,7 @@ nouveau_fbcon_sync(struct fb_info *info) | |||
74 | OUT_RING(chan, 0); | 169 | OUT_RING(chan, 0); |
75 | nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); | 170 | nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); |
76 | FIRE_RING(chan); | 171 | FIRE_RING(chan); |
172 | mutex_unlock(&chan->mutex); | ||
77 | 173 | ||
78 | ret = -EBUSY; | 174 | ret = -EBUSY; |
79 | for (i = 0; i < 100000; i++) { | 175 | for (i = 0; i < 100000; i++) { |
@@ -97,9 +193,9 @@ static struct fb_ops nouveau_fbcon_ops = { | |||
97 | .owner = THIS_MODULE, | 193 | .owner = THIS_MODULE, |
98 | .fb_check_var = drm_fb_helper_check_var, | 194 | .fb_check_var = drm_fb_helper_check_var, |
99 | .fb_set_par = drm_fb_helper_set_par, | 195 | .fb_set_par = drm_fb_helper_set_par, |
100 | .fb_fillrect = cfb_fillrect, | 196 | .fb_fillrect = nouveau_fbcon_fillrect, |
101 | .fb_copyarea = cfb_copyarea, | 197 | .fb_copyarea = nouveau_fbcon_copyarea, |
102 | .fb_imageblit = cfb_imageblit, | 198 | .fb_imageblit = nouveau_fbcon_imageblit, |
103 | .fb_sync = nouveau_fbcon_sync, | 199 | .fb_sync = nouveau_fbcon_sync, |
104 | .fb_pan_display = drm_fb_helper_pan_display, | 200 | .fb_pan_display = drm_fb_helper_pan_display, |
105 | .fb_blank = drm_fb_helper_blank, | 201 | .fb_blank = drm_fb_helper_blank, |
@@ -108,29 +204,13 @@ static struct fb_ops nouveau_fbcon_ops = { | |||
108 | .fb_debug_leave = drm_fb_helper_debug_leave, | 204 | .fb_debug_leave = drm_fb_helper_debug_leave, |
109 | }; | 205 | }; |
110 | 206 | ||
111 | static struct fb_ops nv04_fbcon_ops = { | 207 | static struct fb_ops nouveau_fbcon_sw_ops = { |
112 | .owner = THIS_MODULE, | 208 | .owner = THIS_MODULE, |
113 | .fb_check_var = drm_fb_helper_check_var, | 209 | .fb_check_var = drm_fb_helper_check_var, |
114 | .fb_set_par = drm_fb_helper_set_par, | 210 | .fb_set_par = drm_fb_helper_set_par, |
115 | .fb_fillrect = nv04_fbcon_fillrect, | 211 | .fb_fillrect = cfb_fillrect, |
116 | .fb_copyarea = nv04_fbcon_copyarea, | 212 | .fb_copyarea = cfb_copyarea, |
117 | .fb_imageblit = nv04_fbcon_imageblit, | 213 | .fb_imageblit = cfb_imageblit, |
118 | .fb_sync = nouveau_fbcon_sync, | ||
119 | .fb_pan_display = drm_fb_helper_pan_display, | ||
120 | .fb_blank = drm_fb_helper_blank, | ||
121 | .fb_setcmap = drm_fb_helper_setcmap, | ||
122 | .fb_debug_enter = drm_fb_helper_debug_enter, | ||
123 | .fb_debug_leave = drm_fb_helper_debug_leave, | ||
124 | }; | ||
125 | |||
126 | static struct fb_ops nv50_fbcon_ops = { | ||
127 | .owner = THIS_MODULE, | ||
128 | .fb_check_var = drm_fb_helper_check_var, | ||
129 | .fb_set_par = drm_fb_helper_set_par, | ||
130 | .fb_fillrect = nv50_fbcon_fillrect, | ||
131 | .fb_copyarea = nv50_fbcon_copyarea, | ||
132 | .fb_imageblit = nv50_fbcon_imageblit, | ||
133 | .fb_sync = nouveau_fbcon_sync, | ||
134 | .fb_pan_display = drm_fb_helper_pan_display, | 214 | .fb_pan_display = drm_fb_helper_pan_display, |
135 | .fb_blank = drm_fb_helper_blank, | 215 | .fb_blank = drm_fb_helper_blank, |
136 | .fb_setcmap = drm_fb_helper_setcmap, | 216 | .fb_setcmap = drm_fb_helper_setcmap, |
@@ -257,9 +337,9 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
257 | FBINFO_HWACCEL_FILLRECT | | 337 | FBINFO_HWACCEL_FILLRECT | |
258 | FBINFO_HWACCEL_IMAGEBLIT; | 338 | FBINFO_HWACCEL_IMAGEBLIT; |
259 | info->flags |= FBINFO_CAN_FORCE_OUTPUT; | 339 | info->flags |= FBINFO_CAN_FORCE_OUTPUT; |
260 | info->fbops = &nouveau_fbcon_ops; | 340 | info->fbops = &nouveau_fbcon_sw_ops; |
261 | info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - | 341 | info->fix.smem_start = dev->mode_config.fb_base + |
262 | dev_priv->vm_vram_base; | 342 | (nvbo->bo.mem.start << PAGE_SHIFT); |
263 | info->fix.smem_len = size; | 343 | info->fix.smem_len = size; |
264 | 344 | ||
265 | info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); | 345 | info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); |
@@ -285,19 +365,18 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
285 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | 365 | info->pixmap.flags = FB_PIXMAP_SYSTEM; |
286 | info->pixmap.scan_align = 1; | 366 | info->pixmap.scan_align = 1; |
287 | 367 | ||
368 | mutex_unlock(&dev->struct_mutex); | ||
369 | |||
288 | if (dev_priv->channel && !nouveau_nofbaccel) { | 370 | if (dev_priv->channel && !nouveau_nofbaccel) { |
289 | switch (dev_priv->card_type) { | 371 | ret = -ENODEV; |
290 | case NV_C0: | 372 | if (dev_priv->card_type < NV_50) |
291 | break; | 373 | ret = nv04_fbcon_accel_init(info); |
292 | case NV_50: | 374 | else |
293 | nv50_fbcon_accel_init(info); | 375 | if (dev_priv->card_type < NV_C0) |
294 | info->fbops = &nv50_fbcon_ops; | 376 | ret = nv50_fbcon_accel_init(info); |
295 | break; | 377 | |
296 | default: | 378 | if (ret == 0) |
297 | nv04_fbcon_accel_init(info); | 379 | info->fbops = &nouveau_fbcon_ops; |
298 | info->fbops = &nv04_fbcon_ops; | ||
299 | break; | ||
300 | }; | ||
301 | } | 380 | } |
302 | 381 | ||
303 | nouveau_fbcon_zfill(dev, nfbdev); | 382 | nouveau_fbcon_zfill(dev, nfbdev); |
@@ -308,7 +387,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
308 | nouveau_fb->base.height, | 387 | nouveau_fb->base.height, |
309 | nvbo->bo.offset, nvbo); | 388 | nvbo->bo.offset, nvbo); |
310 | 389 | ||
311 | mutex_unlock(&dev->struct_mutex); | ||
312 | vga_switcheroo_client_fb_set(dev->pdev, info); | 390 | vga_switcheroo_client_fb_set(dev->pdev, info); |
313 | return 0; | 391 | return 0; |
314 | 392 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index e7e12684c37e..6b933f2c3a5b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h | |||
@@ -40,13 +40,13 @@ struct nouveau_fbdev { | |||
40 | 40 | ||
41 | void nouveau_fbcon_restore(void); | 41 | void nouveau_fbcon_restore(void); |
42 | 42 | ||
43 | void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); | 43 | int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); |
44 | void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); | 44 | int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); |
45 | void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); | 45 | int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); |
46 | int nv04_fbcon_accel_init(struct fb_info *info); | 46 | int nv04_fbcon_accel_init(struct fb_info *info); |
47 | void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); | 47 | int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); |
48 | void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); | 48 | int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); |
49 | void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); | 49 | int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); |
50 | int nv50_fbcon_accel_init(struct fb_info *info); | 50 | int nv50_fbcon_accel_init(struct fb_info *info); |
51 | 51 | ||
52 | void nouveau_fbcon_gpu_lockup(struct fb_info *info); | 52 | void nouveau_fbcon_gpu_lockup(struct fb_info *info); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index ab1bbfbf266e..374a9793b85f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -64,6 +64,7 @@ nouveau_fence_del(struct kref *ref) | |||
64 | struct nouveau_fence *fence = | 64 | struct nouveau_fence *fence = |
65 | container_of(ref, struct nouveau_fence, refcount); | 65 | container_of(ref, struct nouveau_fence, refcount); |
66 | 66 | ||
67 | nouveau_channel_ref(NULL, &fence->channel); | ||
67 | kfree(fence); | 68 | kfree(fence); |
68 | } | 69 | } |
69 | 70 | ||
@@ -76,14 +77,17 @@ nouveau_fence_update(struct nouveau_channel *chan) | |||
76 | 77 | ||
77 | spin_lock(&chan->fence.lock); | 78 | spin_lock(&chan->fence.lock); |
78 | 79 | ||
79 | if (USE_REFCNT(dev)) | 80 | /* Fetch the last sequence if the channel is still up and running */ |
80 | sequence = nvchan_rd32(chan, 0x48); | 81 | if (likely(!list_empty(&chan->fence.pending))) { |
81 | else | 82 | if (USE_REFCNT(dev)) |
82 | sequence = atomic_read(&chan->fence.last_sequence_irq); | 83 | sequence = nvchan_rd32(chan, 0x48); |
84 | else | ||
85 | sequence = atomic_read(&chan->fence.last_sequence_irq); | ||
83 | 86 | ||
84 | if (chan->fence.sequence_ack == sequence) | 87 | if (chan->fence.sequence_ack == sequence) |
85 | goto out; | 88 | goto out; |
86 | chan->fence.sequence_ack = sequence; | 89 | chan->fence.sequence_ack = sequence; |
90 | } | ||
87 | 91 | ||
88 | list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { | 92 | list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { |
89 | sequence = fence->sequence; | 93 | sequence = fence->sequence; |
@@ -113,13 +117,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence, | |||
113 | if (!fence) | 117 | if (!fence) |
114 | return -ENOMEM; | 118 | return -ENOMEM; |
115 | kref_init(&fence->refcount); | 119 | kref_init(&fence->refcount); |
116 | fence->channel = chan; | 120 | nouveau_channel_ref(chan, &fence->channel); |
117 | 121 | ||
118 | if (emit) | 122 | if (emit) |
119 | ret = nouveau_fence_emit(fence); | 123 | ret = nouveau_fence_emit(fence); |
120 | 124 | ||
121 | if (ret) | 125 | if (ret) |
122 | nouveau_fence_unref((void *)&fence); | 126 | nouveau_fence_unref(&fence); |
123 | *pfence = fence; | 127 | *pfence = fence; |
124 | return ret; | 128 | return ret; |
125 | } | 129 | } |
@@ -127,7 +131,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence, | |||
127 | struct nouveau_channel * | 131 | struct nouveau_channel * |
128 | nouveau_fence_channel(struct nouveau_fence *fence) | 132 | nouveau_fence_channel(struct nouveau_fence *fence) |
129 | { | 133 | { |
130 | return fence ? fence->channel : NULL; | 134 | return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL; |
131 | } | 135 | } |
132 | 136 | ||
133 | int | 137 | int |
@@ -182,7 +186,7 @@ nouveau_fence_work(struct nouveau_fence *fence, | |||
182 | } | 186 | } |
183 | 187 | ||
184 | void | 188 | void |
185 | nouveau_fence_unref(void **sync_obj) | 189 | __nouveau_fence_unref(void **sync_obj) |
186 | { | 190 | { |
187 | struct nouveau_fence *fence = nouveau_fence(*sync_obj); | 191 | struct nouveau_fence *fence = nouveau_fence(*sync_obj); |
188 | 192 | ||
@@ -192,7 +196,7 @@ nouveau_fence_unref(void **sync_obj) | |||
192 | } | 196 | } |
193 | 197 | ||
194 | void * | 198 | void * |
195 | nouveau_fence_ref(void *sync_obj) | 199 | __nouveau_fence_ref(void *sync_obj) |
196 | { | 200 | { |
197 | struct nouveau_fence *fence = nouveau_fence(sync_obj); | 201 | struct nouveau_fence *fence = nouveau_fence(sync_obj); |
198 | 202 | ||
@@ -201,7 +205,7 @@ nouveau_fence_ref(void *sync_obj) | |||
201 | } | 205 | } |
202 | 206 | ||
203 | bool | 207 | bool |
204 | nouveau_fence_signalled(void *sync_obj, void *sync_arg) | 208 | __nouveau_fence_signalled(void *sync_obj, void *sync_arg) |
205 | { | 209 | { |
206 | struct nouveau_fence *fence = nouveau_fence(sync_obj); | 210 | struct nouveau_fence *fence = nouveau_fence(sync_obj); |
207 | struct nouveau_channel *chan = fence->channel; | 211 | struct nouveau_channel *chan = fence->channel; |
@@ -214,13 +218,14 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg) | |||
214 | } | 218 | } |
215 | 219 | ||
216 | int | 220 | int |
217 | nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) | 221 | __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) |
218 | { | 222 | { |
219 | unsigned long timeout = jiffies + (3 * DRM_HZ); | 223 | unsigned long timeout = jiffies + (3 * DRM_HZ); |
224 | unsigned long sleep_time = jiffies + 1; | ||
220 | int ret = 0; | 225 | int ret = 0; |
221 | 226 | ||
222 | while (1) { | 227 | while (1) { |
223 | if (nouveau_fence_signalled(sync_obj, sync_arg)) | 228 | if (__nouveau_fence_signalled(sync_obj, sync_arg)) |
224 | break; | 229 | break; |
225 | 230 | ||
226 | if (time_after_eq(jiffies, timeout)) { | 231 | if (time_after_eq(jiffies, timeout)) { |
@@ -230,7 +235,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) | |||
230 | 235 | ||
231 | __set_current_state(intr ? TASK_INTERRUPTIBLE | 236 | __set_current_state(intr ? TASK_INTERRUPTIBLE |
232 | : TASK_UNINTERRUPTIBLE); | 237 | : TASK_UNINTERRUPTIBLE); |
233 | if (lazy) | 238 | if (lazy && time_after_eq(jiffies, sleep_time)) |
234 | schedule_timeout(1); | 239 | schedule_timeout(1); |
235 | 240 | ||
236 | if (intr && signal_pending(current)) { | 241 | if (intr && signal_pending(current)) { |
@@ -368,7 +373,7 @@ emit_semaphore(struct nouveau_channel *chan, int method, | |||
368 | 373 | ||
369 | kref_get(&sema->ref); | 374 | kref_get(&sema->ref); |
370 | nouveau_fence_work(fence, semaphore_work, sema); | 375 | nouveau_fence_work(fence, semaphore_work, sema); |
371 | nouveau_fence_unref((void *)&fence); | 376 | nouveau_fence_unref(&fence); |
372 | 377 | ||
373 | return 0; | 378 | return 0; |
374 | } | 379 | } |
@@ -380,33 +385,49 @@ nouveau_fence_sync(struct nouveau_fence *fence, | |||
380 | struct nouveau_channel *chan = nouveau_fence_channel(fence); | 385 | struct nouveau_channel *chan = nouveau_fence_channel(fence); |
381 | struct drm_device *dev = wchan->dev; | 386 | struct drm_device *dev = wchan->dev; |
382 | struct nouveau_semaphore *sema; | 387 | struct nouveau_semaphore *sema; |
383 | int ret; | 388 | int ret = 0; |
384 | 389 | ||
385 | if (likely(!fence || chan == wchan || | 390 | if (likely(!chan || chan == wchan || |
386 | nouveau_fence_signalled(fence, NULL))) | 391 | nouveau_fence_signalled(fence))) |
387 | return 0; | 392 | goto out; |
388 | 393 | ||
389 | sema = alloc_semaphore(dev); | 394 | sema = alloc_semaphore(dev); |
390 | if (!sema) { | 395 | if (!sema) { |
391 | /* Early card or broken userspace, fall back to | 396 | /* Early card or broken userspace, fall back to |
392 | * software sync. */ | 397 | * software sync. */ |
393 | return nouveau_fence_wait(fence, NULL, false, false); | 398 | ret = nouveau_fence_wait(fence, true, false); |
399 | goto out; | ||
400 | } | ||
401 | |||
402 | /* try to take chan's mutex, if we can't take it right away | ||
403 | * we have to fallback to software sync to prevent locking | ||
404 | * order issues | ||
405 | */ | ||
406 | if (!mutex_trylock(&chan->mutex)) { | ||
407 | ret = nouveau_fence_wait(fence, true, false); | ||
408 | goto out_unref; | ||
394 | } | 409 | } |
395 | 410 | ||
396 | /* Make wchan wait until it gets signalled */ | 411 | /* Make wchan wait until it gets signalled */ |
397 | ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); | 412 | ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); |
398 | if (ret) | 413 | if (ret) |
399 | goto out; | 414 | goto out_unlock; |
400 | 415 | ||
401 | /* Signal the semaphore from chan */ | 416 | /* Signal the semaphore from chan */ |
402 | ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); | 417 | ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); |
403 | out: | 418 | |
419 | out_unlock: | ||
420 | mutex_unlock(&chan->mutex); | ||
421 | out_unref: | ||
404 | kref_put(&sema->ref, free_semaphore); | 422 | kref_put(&sema->ref, free_semaphore); |
423 | out: | ||
424 | if (chan) | ||
425 | nouveau_channel_put_unlocked(&chan); | ||
405 | return ret; | 426 | return ret; |
406 | } | 427 | } |
407 | 428 | ||
408 | int | 429 | int |
409 | nouveau_fence_flush(void *sync_obj, void *sync_arg) | 430 | __nouveau_fence_flush(void *sync_obj, void *sync_arg) |
410 | { | 431 | { |
411 | return 0; | 432 | return 0; |
412 | } | 433 | } |
@@ -420,12 +441,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) | |||
420 | int ret; | 441 | int ret; |
421 | 442 | ||
422 | /* Create an NV_SW object for various sync purposes */ | 443 | /* Create an NV_SW object for various sync purposes */ |
423 | ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); | 444 | ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW); |
424 | if (ret) | ||
425 | return ret; | ||
426 | |||
427 | ret = nouveau_ramht_insert(chan, NvSw, obj); | ||
428 | nouveau_gpuobj_ref(NULL, &obj); | ||
429 | if (ret) | 445 | if (ret) |
430 | return ret; | 446 | return ret; |
431 | 447 | ||
@@ -437,13 +453,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) | |||
437 | 453 | ||
438 | /* Create a DMA object for the shared cross-channel sync area. */ | 454 | /* Create a DMA object for the shared cross-channel sync area. */ |
439 | if (USE_SEMA(dev)) { | 455 | if (USE_SEMA(dev)) { |
440 | struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node; | 456 | struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; |
441 | 457 | ||
442 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 458 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
443 | mem->start << PAGE_SHIFT, | 459 | mem->start << PAGE_SHIFT, |
444 | mem->size << PAGE_SHIFT, | 460 | mem->size, NV_MEM_ACCESS_RW, |
445 | NV_DMA_ACCESS_RW, | 461 | NV_MEM_TARGET_VRAM, &obj); |
446 | NV_DMA_TARGET_VIDMEM, &obj); | ||
447 | if (ret) | 462 | if (ret) |
448 | return ret; | 463 | return ret; |
449 | 464 | ||
@@ -473,6 +488,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) | |||
473 | { | 488 | { |
474 | struct nouveau_fence *tmp, *fence; | 489 | struct nouveau_fence *tmp, *fence; |
475 | 490 | ||
491 | spin_lock(&chan->fence.lock); | ||
492 | |||
476 | list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { | 493 | list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { |
477 | fence->signalled = true; | 494 | fence->signalled = true; |
478 | list_del(&fence->entry); | 495 | list_del(&fence->entry); |
@@ -482,6 +499,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) | |||
482 | 499 | ||
483 | kref_put(&fence->refcount, nouveau_fence_del); | 500 | kref_put(&fence->refcount, nouveau_fence_del); |
484 | } | 501 | } |
502 | |||
503 | spin_unlock(&chan->fence.lock); | ||
485 | } | 504 | } |
486 | 505 | ||
487 | int | 506 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 9a1fdcf400c2..506c508b7eda 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -48,9 +48,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) | |||
48 | return; | 48 | return; |
49 | nvbo->gem = NULL; | 49 | nvbo->gem = NULL; |
50 | 50 | ||
51 | if (unlikely(nvbo->cpu_filp)) | ||
52 | ttm_bo_synccpu_write_release(bo); | ||
53 | |||
54 | if (unlikely(nvbo->pin_refcnt)) { | 51 | if (unlikely(nvbo->pin_refcnt)) { |
55 | nvbo->pin_refcnt = 1; | 52 | nvbo->pin_refcnt = 1; |
56 | nouveau_bo_unpin(nvbo); | 53 | nouveau_bo_unpin(nvbo); |
@@ -106,32 +103,6 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) | |||
106 | return 0; | 103 | return 0; |
107 | } | 104 | } |
108 | 105 | ||
109 | static bool | ||
110 | nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) | ||
111 | { | ||
112 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
113 | |||
114 | if (dev_priv->card_type >= NV_50) { | ||
115 | switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) { | ||
116 | case 0x0000: | ||
117 | case 0x1800: | ||
118 | case 0x2800: | ||
119 | case 0x4800: | ||
120 | case 0x7000: | ||
121 | case 0x7400: | ||
122 | case 0x7a00: | ||
123 | case 0xe000: | ||
124 | return true; | ||
125 | } | ||
126 | } else { | ||
127 | if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) | ||
128 | return true; | ||
129 | } | ||
130 | |||
131 | NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); | ||
132 | return false; | ||
133 | } | ||
134 | |||
135 | int | 106 | int |
136 | nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | 107 | nouveau_gem_ioctl_new(struct drm_device *dev, void *data, |
137 | struct drm_file *file_priv) | 108 | struct drm_file *file_priv) |
@@ -146,11 +117,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
146 | if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) | 117 | if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) |
147 | dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; | 118 | dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; |
148 | 119 | ||
149 | if (req->channel_hint) { | ||
150 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint, | ||
151 | file_priv, chan); | ||
152 | } | ||
153 | |||
154 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) | 120 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) |
155 | flags |= TTM_PL_FLAG_VRAM; | 121 | flags |= TTM_PL_FLAG_VRAM; |
156 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) | 122 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) |
@@ -158,13 +124,23 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
158 | if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) | 124 | if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) |
159 | flags |= TTM_PL_FLAG_SYSTEM; | 125 | flags |= TTM_PL_FLAG_SYSTEM; |
160 | 126 | ||
161 | if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) | 127 | if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { |
128 | NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); | ||
162 | return -EINVAL; | 129 | return -EINVAL; |
130 | } | ||
131 | |||
132 | if (req->channel_hint) { | ||
133 | chan = nouveau_channel_get(dev, file_priv, req->channel_hint); | ||
134 | if (IS_ERR(chan)) | ||
135 | return PTR_ERR(chan); | ||
136 | } | ||
163 | 137 | ||
164 | ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, | 138 | ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, |
165 | req->info.tile_mode, req->info.tile_flags, false, | 139 | req->info.tile_mode, req->info.tile_flags, false, |
166 | (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), | 140 | (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), |
167 | &nvbo); | 141 | &nvbo); |
142 | if (chan) | ||
143 | nouveau_channel_put(&chan); | ||
168 | if (ret) | 144 | if (ret) |
169 | return ret; | 145 | return ret; |
170 | 146 | ||
@@ -231,15 +207,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) | |||
231 | 207 | ||
232 | list_for_each_safe(entry, tmp, list) { | 208 | list_for_each_safe(entry, tmp, list) { |
233 | nvbo = list_entry(entry, struct nouveau_bo, entry); | 209 | nvbo = list_entry(entry, struct nouveau_bo, entry); |
234 | if (likely(fence)) { | 210 | |
235 | struct nouveau_fence *prev_fence; | 211 | nouveau_bo_fence(nvbo, fence); |
236 | |||
237 | spin_lock(&nvbo->bo.lock); | ||
238 | prev_fence = nvbo->bo.sync_obj; | ||
239 | nvbo->bo.sync_obj = nouveau_fence_ref(fence); | ||
240 | spin_unlock(&nvbo->bo.lock); | ||
241 | nouveau_fence_unref((void *)&prev_fence); | ||
242 | } | ||
243 | 212 | ||
244 | if (unlikely(nvbo->validate_mapped)) { | 213 | if (unlikely(nvbo->validate_mapped)) { |
245 | ttm_bo_kunmap(&nvbo->kmap); | 214 | ttm_bo_kunmap(&nvbo->kmap); |
@@ -299,14 +268,15 @@ retry: | |||
299 | return -EINVAL; | 268 | return -EINVAL; |
300 | } | 269 | } |
301 | 270 | ||
302 | ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); | 271 | ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); |
303 | if (ret) { | 272 | if (ret) { |
304 | validate_fini(op, NULL); | 273 | validate_fini(op, NULL); |
305 | if (ret == -EAGAIN) | 274 | if (unlikely(ret == -EAGAIN)) |
306 | ret = ttm_bo_wait_unreserved(&nvbo->bo, false); | 275 | ret = ttm_bo_wait_unreserved(&nvbo->bo, true); |
307 | drm_gem_object_unreference_unlocked(gem); | 276 | drm_gem_object_unreference_unlocked(gem); |
308 | if (ret) { | 277 | if (unlikely(ret)) { |
309 | NV_ERROR(dev, "fail reserve\n"); | 278 | if (ret != -ERESTARTSYS) |
279 | NV_ERROR(dev, "fail reserve\n"); | ||
310 | return ret; | 280 | return ret; |
311 | } | 281 | } |
312 | goto retry; | 282 | goto retry; |
@@ -331,25 +301,6 @@ retry: | |||
331 | validate_fini(op, NULL); | 301 | validate_fini(op, NULL); |
332 | return -EINVAL; | 302 | return -EINVAL; |
333 | } | 303 | } |
334 | |||
335 | if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) { | ||
336 | validate_fini(op, NULL); | ||
337 | |||
338 | if (nvbo->cpu_filp == file_priv) { | ||
339 | NV_ERROR(dev, "bo %p mapped by process trying " | ||
340 | "to validate it!\n", nvbo); | ||
341 | return -EINVAL; | ||
342 | } | ||
343 | |||
344 | mutex_unlock(&drm_global_mutex); | ||
345 | ret = ttm_bo_wait_cpu(&nvbo->bo, false); | ||
346 | mutex_lock(&drm_global_mutex); | ||
347 | if (ret) { | ||
348 | NV_ERROR(dev, "fail wait_cpu\n"); | ||
349 | return ret; | ||
350 | } | ||
351 | goto retry; | ||
352 | } | ||
353 | } | 304 | } |
354 | 305 | ||
355 | return 0; | 306 | return 0; |
@@ -383,11 +334,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, | |||
383 | } | 334 | } |
384 | 335 | ||
385 | nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; | 336 | nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; |
386 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, | 337 | ret = nouveau_bo_validate(nvbo, true, false, false); |
387 | false, false, false); | ||
388 | nvbo->channel = NULL; | 338 | nvbo->channel = NULL; |
389 | if (unlikely(ret)) { | 339 | if (unlikely(ret)) { |
390 | NV_ERROR(dev, "fail ttm_validate\n"); | 340 | if (ret != -ERESTARTSYS) |
341 | NV_ERROR(dev, "fail ttm_validate\n"); | ||
391 | return ret; | 342 | return ret; |
392 | } | 343 | } |
393 | 344 | ||
@@ -439,13 +390,15 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |||
439 | 390 | ||
440 | ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); | 391 | ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); |
441 | if (unlikely(ret)) { | 392 | if (unlikely(ret)) { |
442 | NV_ERROR(dev, "validate_init\n"); | 393 | if (ret != -ERESTARTSYS) |
394 | NV_ERROR(dev, "validate_init\n"); | ||
443 | return ret; | 395 | return ret; |
444 | } | 396 | } |
445 | 397 | ||
446 | ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); | 398 | ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); |
447 | if (unlikely(ret < 0)) { | 399 | if (unlikely(ret < 0)) { |
448 | NV_ERROR(dev, "validate vram_list\n"); | 400 | if (ret != -ERESTARTSYS) |
401 | NV_ERROR(dev, "validate vram_list\n"); | ||
449 | validate_fini(op, NULL); | 402 | validate_fini(op, NULL); |
450 | return ret; | 403 | return ret; |
451 | } | 404 | } |
@@ -453,7 +406,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |||
453 | 406 | ||
454 | ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); | 407 | ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); |
455 | if (unlikely(ret < 0)) { | 408 | if (unlikely(ret < 0)) { |
456 | NV_ERROR(dev, "validate gart_list\n"); | 409 | if (ret != -ERESTARTSYS) |
410 | NV_ERROR(dev, "validate gart_list\n"); | ||
457 | validate_fini(op, NULL); | 411 | validate_fini(op, NULL); |
458 | return ret; | 412 | return ret; |
459 | } | 413 | } |
@@ -461,7 +415,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |||
461 | 415 | ||
462 | ret = validate_list(chan, &op->both_list, pbbo, user_buffers); | 416 | ret = validate_list(chan, &op->both_list, pbbo, user_buffers); |
463 | if (unlikely(ret < 0)) { | 417 | if (unlikely(ret < 0)) { |
464 | NV_ERROR(dev, "validate both_list\n"); | 418 | if (ret != -ERESTARTSYS) |
419 | NV_ERROR(dev, "validate both_list\n"); | ||
465 | validate_fini(op, NULL); | 420 | validate_fini(op, NULL); |
466 | return ret; | 421 | return ret; |
467 | } | 422 | } |
@@ -557,9 +512,9 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, | |||
557 | data |= r->vor; | 512 | data |= r->vor; |
558 | } | 513 | } |
559 | 514 | ||
560 | spin_lock(&nvbo->bo.lock); | 515 | spin_lock(&nvbo->bo.bdev->fence_lock); |
561 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | 516 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); |
562 | spin_unlock(&nvbo->bo.lock); | 517 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
563 | if (ret) { | 518 | if (ret) { |
564 | NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); | 519 | NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); |
565 | break; | 520 | break; |
@@ -585,7 +540,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
585 | struct nouveau_fence *fence = NULL; | 540 | struct nouveau_fence *fence = NULL; |
586 | int i, j, ret = 0, do_reloc = 0; | 541 | int i, j, ret = 0, do_reloc = 0; |
587 | 542 | ||
588 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); | 543 | chan = nouveau_channel_get(dev, file_priv, req->channel); |
544 | if (IS_ERR(chan)) | ||
545 | return PTR_ERR(chan); | ||
589 | 546 | ||
590 | req->vram_available = dev_priv->fb_aper_free; | 547 | req->vram_available = dev_priv->fb_aper_free; |
591 | req->gart_available = dev_priv->gart_info.aper_free; | 548 | req->gart_available = dev_priv->gart_info.aper_free; |
@@ -595,28 +552,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
595 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { | 552 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { |
596 | NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", | 553 | NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", |
597 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); | 554 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); |
555 | nouveau_channel_put(&chan); | ||
598 | return -EINVAL; | 556 | return -EINVAL; |
599 | } | 557 | } |
600 | 558 | ||
601 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { | 559 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { |
602 | NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", | 560 | NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", |
603 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); | 561 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); |
562 | nouveau_channel_put(&chan); | ||
604 | return -EINVAL; | 563 | return -EINVAL; |
605 | } | 564 | } |
606 | 565 | ||
607 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { | 566 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { |
608 | NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", | 567 | NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", |
609 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); | 568 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); |
569 | nouveau_channel_put(&chan); | ||
610 | return -EINVAL; | 570 | return -EINVAL; |
611 | } | 571 | } |
612 | 572 | ||
613 | push = u_memcpya(req->push, req->nr_push, sizeof(*push)); | 573 | push = u_memcpya(req->push, req->nr_push, sizeof(*push)); |
614 | if (IS_ERR(push)) | 574 | if (IS_ERR(push)) { |
575 | nouveau_channel_put(&chan); | ||
615 | return PTR_ERR(push); | 576 | return PTR_ERR(push); |
577 | } | ||
616 | 578 | ||
617 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); | 579 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); |
618 | if (IS_ERR(bo)) { | 580 | if (IS_ERR(bo)) { |
619 | kfree(push); | 581 | kfree(push); |
582 | nouveau_channel_put(&chan); | ||
620 | return PTR_ERR(bo); | 583 | return PTR_ERR(bo); |
621 | } | 584 | } |
622 | 585 | ||
@@ -639,7 +602,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
639 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, | 602 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, |
640 | req->nr_buffers, &op, &do_reloc); | 603 | req->nr_buffers, &op, &do_reloc); |
641 | if (ret) { | 604 | if (ret) { |
642 | NV_ERROR(dev, "validate: %d\n", ret); | 605 | if (ret != -ERESTARTSYS) |
606 | NV_ERROR(dev, "validate: %d\n", ret); | ||
643 | goto out; | 607 | goto out; |
644 | } | 608 | } |
645 | 609 | ||
@@ -732,7 +696,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
732 | 696 | ||
733 | out: | 697 | out: |
734 | validate_fini(&op, fence); | 698 | validate_fini(&op, fence); |
735 | nouveau_fence_unref((void**)&fence); | 699 | nouveau_fence_unref(&fence); |
736 | kfree(bo); | 700 | kfree(bo); |
737 | kfree(push); | 701 | kfree(push); |
738 | 702 | ||
@@ -750,6 +714,7 @@ out_next: | |||
750 | req->suffix1 = 0x00000000; | 714 | req->suffix1 = 0x00000000; |
751 | } | 715 | } |
752 | 716 | ||
717 | nouveau_channel_put(&chan); | ||
753 | return ret; | 718 | return ret; |
754 | } | 719 | } |
755 | 720 | ||
@@ -781,26 +746,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |||
781 | return -ENOENT; | 746 | return -ENOENT; |
782 | nvbo = nouveau_gem_object(gem); | 747 | nvbo = nouveau_gem_object(gem); |
783 | 748 | ||
784 | if (nvbo->cpu_filp) { | 749 | spin_lock(&nvbo->bo.bdev->fence_lock); |
785 | if (nvbo->cpu_filp == file_priv) | 750 | ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); |
786 | goto out; | 751 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
787 | |||
788 | ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait); | ||
789 | if (ret) | ||
790 | goto out; | ||
791 | } | ||
792 | |||
793 | if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { | ||
794 | spin_lock(&nvbo->bo.lock); | ||
795 | ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); | ||
796 | spin_unlock(&nvbo->bo.lock); | ||
797 | } else { | ||
798 | ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); | ||
799 | if (ret == 0) | ||
800 | nvbo->cpu_filp = file_priv; | ||
801 | } | ||
802 | |||
803 | out: | ||
804 | drm_gem_object_unreference_unlocked(gem); | 752 | drm_gem_object_unreference_unlocked(gem); |
805 | return ret; | 753 | return ret; |
806 | } | 754 | } |
@@ -809,26 +757,7 @@ int | |||
809 | nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, | 757 | nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, |
810 | struct drm_file *file_priv) | 758 | struct drm_file *file_priv) |
811 | { | 759 | { |
812 | struct drm_nouveau_gem_cpu_prep *req = data; | 760 | return 0; |
813 | struct drm_gem_object *gem; | ||
814 | struct nouveau_bo *nvbo; | ||
815 | int ret = -EINVAL; | ||
816 | |||
817 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); | ||
818 | if (!gem) | ||
819 | return -ENOENT; | ||
820 | nvbo = nouveau_gem_object(gem); | ||
821 | |||
822 | if (nvbo->cpu_filp != file_priv) | ||
823 | goto out; | ||
824 | nvbo->cpu_filp = NULL; | ||
825 | |||
826 | ttm_bo_synccpu_write_release(&nvbo->bo); | ||
827 | ret = 0; | ||
828 | |||
829 | out: | ||
830 | drm_gem_object_unreference_unlocked(gem); | ||
831 | return ret; | ||
832 | } | 761 | } |
833 | 762 | ||
834 | int | 763 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index b9672a05c411..053edf9d2f67 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c | |||
@@ -953,7 +953,7 @@ nv_load_state_ext(struct drm_device *dev, int head, | |||
953 | NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); | 953 | NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); |
954 | 954 | ||
955 | reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); | 955 | reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); |
956 | if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC) | 956 | if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC) |
957 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); | 957 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); |
958 | else | 958 | else |
959 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); | 959 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); |
@@ -999,8 +999,8 @@ nv_load_state_ext(struct drm_device *dev, int head, | |||
999 | if (dev_priv->card_type == NV_10) { | 999 | if (dev_priv->card_type == NV_10) { |
1000 | /* Not waiting for vertical retrace before modifying | 1000 | /* Not waiting for vertical retrace before modifying |
1001 | CRE_53/CRE_54 causes lockups. */ | 1001 | CRE_53/CRE_54 causes lockups. */ |
1002 | nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); | 1002 | nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); |
1003 | nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); | 1003 | nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | wr_cio_state(dev, head, regp, NV_CIO_CRE_53); | 1006 | wr_cio_state(dev, head, regp, NV_CIO_CRE_53); |
@@ -1017,8 +1017,9 @@ nv_load_state_ext(struct drm_device *dev, int head, | |||
1017 | 1017 | ||
1018 | NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); | 1018 | NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); |
1019 | 1019 | ||
1020 | /* Setting 1 on this value gives you interrupts for every vblank period. */ | 1020 | /* Enable vblank interrupts. */ |
1021 | NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0); | 1021 | NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, |
1022 | (dev->vblank_enabled[head] ? 1 : 0)); | ||
1022 | NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK); | 1023 | NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK); |
1023 | } | 1024 | } |
1024 | 1025 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 7bfd9e6c9d67..2ba7265bc967 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -36,18 +36,7 @@ | |||
36 | #include "nouveau_drv.h" | 36 | #include "nouveau_drv.h" |
37 | #include "nouveau_reg.h" | 37 | #include "nouveau_reg.h" |
38 | #include "nouveau_ramht.h" | 38 | #include "nouveau_ramht.h" |
39 | #include <linux/ratelimit.h> | 39 | #include "nouveau_util.h" |
40 | |||
41 | /* needed for hotplug irq */ | ||
42 | #include "nouveau_connector.h" | ||
43 | #include "nv50_display.h" | ||
44 | |||
45 | static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); | ||
46 | |||
47 | static int nouveau_ratelimit(void) | ||
48 | { | ||
49 | return __ratelimit(&nouveau_ratelimit_state); | ||
50 | } | ||
51 | 40 | ||
52 | void | 41 | void |
53 | nouveau_irq_preinstall(struct drm_device *dev) | 42 | nouveau_irq_preinstall(struct drm_device *dev) |
@@ -57,19 +46,19 @@ nouveau_irq_preinstall(struct drm_device *dev) | |||
57 | /* Master disable */ | 46 | /* Master disable */ |
58 | nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); | 47 | nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); |
59 | 48 | ||
60 | if (dev_priv->card_type >= NV_50) { | 49 | INIT_LIST_HEAD(&dev_priv->vbl_waiting); |
61 | INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); | ||
62 | INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); | ||
63 | spin_lock_init(&dev_priv->hpd_state.lock); | ||
64 | INIT_LIST_HEAD(&dev_priv->vbl_waiting); | ||
65 | } | ||
66 | } | 50 | } |
67 | 51 | ||
68 | int | 52 | int |
69 | nouveau_irq_postinstall(struct drm_device *dev) | 53 | nouveau_irq_postinstall(struct drm_device *dev) |
70 | { | 54 | { |
55 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
56 | |||
71 | /* Master enable */ | 57 | /* Master enable */ |
72 | nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); | 58 | nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); |
59 | if (dev_priv->msi_enabled) | ||
60 | nv_wr08(dev, 0x00088068, 0xff); | ||
61 | |||
73 | return 0; | 62 | return 0; |
74 | } | 63 | } |
75 | 64 | ||
@@ -80,1178 +69,83 @@ nouveau_irq_uninstall(struct drm_device *dev) | |||
80 | nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); | 69 | nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); |
81 | } | 70 | } |
82 | 71 | ||
83 | static int | 72 | irqreturn_t |
84 | nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data) | 73 | nouveau_irq_handler(DRM_IRQ_ARGS) |
85 | { | ||
86 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
87 | struct nouveau_pgraph_object_method *grm; | ||
88 | struct nouveau_pgraph_object_class *grc; | ||
89 | |||
90 | grc = dev_priv->engine.graph.grclass; | ||
91 | while (grc->id) { | ||
92 | if (grc->id == class) | ||
93 | break; | ||
94 | grc++; | ||
95 | } | ||
96 | |||
97 | if (grc->id != class || !grc->methods) | ||
98 | return -ENOENT; | ||
99 | |||
100 | grm = grc->methods; | ||
101 | while (grm->id) { | ||
102 | if (grm->id == mthd) | ||
103 | return grm->exec(chan, class, mthd, data); | ||
104 | grm++; | ||
105 | } | ||
106 | |||
107 | return -ENOENT; | ||
108 | } | ||
109 | |||
110 | static bool | ||
111 | nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data) | ||
112 | { | ||
113 | struct drm_device *dev = chan->dev; | ||
114 | const int subc = (addr >> 13) & 0x7; | ||
115 | const int mthd = addr & 0x1ffc; | ||
116 | |||
117 | if (mthd == 0x0000) { | ||
118 | struct nouveau_gpuobj *gpuobj; | ||
119 | |||
120 | gpuobj = nouveau_ramht_find(chan, data); | ||
121 | if (!gpuobj) | ||
122 | return false; | ||
123 | |||
124 | if (gpuobj->engine != NVOBJ_ENGINE_SW) | ||
125 | return false; | ||
126 | |||
127 | chan->sw_subchannel[subc] = gpuobj->class; | ||
128 | nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev, | ||
129 | NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4)); | ||
130 | return true; | ||
131 | } | ||
132 | |||
133 | /* hw object */ | ||
134 | if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4))) | ||
135 | return false; | ||
136 | |||
137 | if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data)) | ||
138 | return false; | ||
139 | |||
140 | return true; | ||
141 | } | ||
142 | |||
143 | static void | ||
144 | nouveau_fifo_irq_handler(struct drm_device *dev) | ||
145 | { | ||
146 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
147 | struct nouveau_engine *engine = &dev_priv->engine; | ||
148 | uint32_t status, reassign; | ||
149 | int cnt = 0; | ||
150 | |||
151 | reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; | ||
152 | while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { | ||
153 | struct nouveau_channel *chan = NULL; | ||
154 | uint32_t chid, get; | ||
155 | |||
156 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | ||
157 | |||
158 | chid = engine->fifo.channel_id(dev); | ||
159 | if (chid >= 0 && chid < engine->fifo.channels) | ||
160 | chan = dev_priv->fifos[chid]; | ||
161 | get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); | ||
162 | |||
163 | if (status & NV_PFIFO_INTR_CACHE_ERROR) { | ||
164 | uint32_t mthd, data; | ||
165 | int ptr; | ||
166 | |||
167 | /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before | ||
168 | * wrapping on my G80 chips, but CACHE1 isn't big | ||
169 | * enough for this much data.. Tests show that it | ||
170 | * wraps around to the start at GET=0x800.. No clue | ||
171 | * as to why.. | ||
172 | */ | ||
173 | ptr = (get & 0x7ff) >> 2; | ||
174 | |||
175 | if (dev_priv->card_type < NV_40) { | ||
176 | mthd = nv_rd32(dev, | ||
177 | NV04_PFIFO_CACHE1_METHOD(ptr)); | ||
178 | data = nv_rd32(dev, | ||
179 | NV04_PFIFO_CACHE1_DATA(ptr)); | ||
180 | } else { | ||
181 | mthd = nv_rd32(dev, | ||
182 | NV40_PFIFO_CACHE1_METHOD(ptr)); | ||
183 | data = nv_rd32(dev, | ||
184 | NV40_PFIFO_CACHE1_DATA(ptr)); | ||
185 | } | ||
186 | |||
187 | if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) { | ||
188 | NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " | ||
189 | "Mthd 0x%04x Data 0x%08x\n", | ||
190 | chid, (mthd >> 13) & 7, mthd & 0x1ffc, | ||
191 | data); | ||
192 | } | ||
193 | |||
194 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); | ||
195 | nv_wr32(dev, NV03_PFIFO_INTR_0, | ||
196 | NV_PFIFO_INTR_CACHE_ERROR); | ||
197 | |||
198 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, | ||
199 | nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1); | ||
200 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); | ||
201 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, | ||
202 | nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1); | ||
203 | nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); | ||
204 | |||
205 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, | ||
206 | nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); | ||
207 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
208 | |||
209 | status &= ~NV_PFIFO_INTR_CACHE_ERROR; | ||
210 | } | ||
211 | |||
212 | if (status & NV_PFIFO_INTR_DMA_PUSHER) { | ||
213 | u32 dma_get = nv_rd32(dev, 0x003244); | ||
214 | u32 dma_put = nv_rd32(dev, 0x003240); | ||
215 | u32 push = nv_rd32(dev, 0x003220); | ||
216 | u32 state = nv_rd32(dev, 0x003228); | ||
217 | |||
218 | if (dev_priv->card_type == NV_50) { | ||
219 | u32 ho_get = nv_rd32(dev, 0x003328); | ||
220 | u32 ho_put = nv_rd32(dev, 0x003320); | ||
221 | u32 ib_get = nv_rd32(dev, 0x003334); | ||
222 | u32 ib_put = nv_rd32(dev, 0x003330); | ||
223 | |||
224 | if (nouveau_ratelimit()) | ||
225 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " | ||
226 | "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " | ||
227 | "State 0x%08x Push 0x%08x\n", | ||
228 | chid, ho_get, dma_get, ho_put, | ||
229 | dma_put, ib_get, ib_put, state, | ||
230 | push); | ||
231 | |||
232 | /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ | ||
233 | nv_wr32(dev, 0x003364, 0x00000000); | ||
234 | if (dma_get != dma_put || ho_get != ho_put) { | ||
235 | nv_wr32(dev, 0x003244, dma_put); | ||
236 | nv_wr32(dev, 0x003328, ho_put); | ||
237 | } else | ||
238 | if (ib_get != ib_put) { | ||
239 | nv_wr32(dev, 0x003334, ib_put); | ||
240 | } | ||
241 | } else { | ||
242 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " | ||
243 | "Put 0x%08x State 0x%08x Push 0x%08x\n", | ||
244 | chid, dma_get, dma_put, state, push); | ||
245 | |||
246 | if (dma_get != dma_put) | ||
247 | nv_wr32(dev, 0x003244, dma_put); | ||
248 | } | ||
249 | |||
250 | nv_wr32(dev, 0x003228, 0x00000000); | ||
251 | nv_wr32(dev, 0x003220, 0x00000001); | ||
252 | nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); | ||
253 | status &= ~NV_PFIFO_INTR_DMA_PUSHER; | ||
254 | } | ||
255 | |||
256 | if (status & NV_PFIFO_INTR_SEMAPHORE) { | ||
257 | uint32_t sem; | ||
258 | |||
259 | status &= ~NV_PFIFO_INTR_SEMAPHORE; | ||
260 | nv_wr32(dev, NV03_PFIFO_INTR_0, | ||
261 | NV_PFIFO_INTR_SEMAPHORE); | ||
262 | |||
263 | sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); | ||
264 | nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); | ||
265 | |||
266 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); | ||
267 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
268 | } | ||
269 | |||
270 | if (dev_priv->card_type == NV_50) { | ||
271 | if (status & 0x00000010) { | ||
272 | nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT"); | ||
273 | status &= ~0x00000010; | ||
274 | nv_wr32(dev, 0x002100, 0x00000010); | ||
275 | } | ||
276 | } | ||
277 | |||
278 | if (status) { | ||
279 | if (nouveau_ratelimit()) | ||
280 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", | ||
281 | status, chid); | ||
282 | nv_wr32(dev, NV03_PFIFO_INTR_0, status); | ||
283 | status = 0; | ||
284 | } | ||
285 | |||
286 | nv_wr32(dev, NV03_PFIFO_CACHES, reassign); | ||
287 | } | ||
288 | |||
289 | if (status) { | ||
290 | NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt); | ||
291 | nv_wr32(dev, 0x2140, 0); | ||
292 | nv_wr32(dev, 0x140, 0); | ||
293 | } | ||
294 | |||
295 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); | ||
296 | } | ||
297 | |||
298 | struct nouveau_bitfield_names { | ||
299 | uint32_t mask; | ||
300 | const char *name; | ||
301 | }; | ||
302 | |||
303 | static struct nouveau_bitfield_names nstatus_names[] = | ||
304 | { | ||
305 | { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, | ||
306 | { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, | ||
307 | { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, | ||
308 | { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } | ||
309 | }; | ||
310 | |||
311 | static struct nouveau_bitfield_names nstatus_names_nv10[] = | ||
312 | { | ||
313 | { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, | ||
314 | { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, | ||
315 | { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, | ||
316 | { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } | ||
317 | }; | ||
318 | |||
319 | static struct nouveau_bitfield_names nsource_names[] = | ||
320 | { | ||
321 | { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, | ||
322 | { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, | ||
323 | { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, | ||
324 | { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, | ||
325 | { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, | ||
326 | { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, | ||
327 | { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, | ||
328 | { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, | ||
329 | { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, | ||
330 | { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, | ||
331 | { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, | ||
332 | { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, | ||
333 | { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, | ||
334 | { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, | ||
335 | { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, | ||
336 | { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, | ||
337 | { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, | ||
338 | { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, | ||
339 | { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, | ||
340 | }; | ||
341 | |||
342 | static void | ||
343 | nouveau_print_bitfield_names_(uint32_t value, | ||
344 | const struct nouveau_bitfield_names *namelist, | ||
345 | const int namelist_len) | ||
346 | { | ||
347 | /* | ||
348 | * Caller must have already printed the KERN_* log level for us. | ||
349 | * Also the caller is responsible for adding the newline. | ||
350 | */ | ||
351 | int i; | ||
352 | for (i = 0; i < namelist_len; ++i) { | ||
353 | uint32_t mask = namelist[i].mask; | ||
354 | if (value & mask) { | ||
355 | printk(" %s", namelist[i].name); | ||
356 | value &= ~mask; | ||
357 | } | ||
358 | } | ||
359 | if (value) | ||
360 | printk(" (unknown bits 0x%08x)", value); | ||
361 | } | ||
362 | #define nouveau_print_bitfield_names(val, namelist) \ | ||
363 | nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) | ||
364 | |||
365 | struct nouveau_enum_names { | ||
366 | uint32_t value; | ||
367 | const char *name; | ||
368 | }; | ||
369 | |||
370 | static void | ||
371 | nouveau_print_enum_names_(uint32_t value, | ||
372 | const struct nouveau_enum_names *namelist, | ||
373 | const int namelist_len) | ||
374 | { | ||
375 | /* | ||
376 | * Caller must have already printed the KERN_* log level for us. | ||
377 | * Also the caller is responsible for adding the newline. | ||
378 | */ | ||
379 | int i; | ||
380 | for (i = 0; i < namelist_len; ++i) { | ||
381 | if (value == namelist[i].value) { | ||
382 | printk("%s", namelist[i].name); | ||
383 | return; | ||
384 | } | ||
385 | } | ||
386 | printk("unknown value 0x%08x", value); | ||
387 | } | ||
388 | #define nouveau_print_enum_names(val, namelist) \ | ||
389 | nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist)) | ||
390 | |||
391 | static int | ||
392 | nouveau_graph_chid_from_grctx(struct drm_device *dev) | ||
393 | { | 74 | { |
75 | struct drm_device *dev = (struct drm_device *)arg; | ||
394 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 76 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
395 | uint32_t inst; | 77 | unsigned long flags; |
78 | u32 stat; | ||
396 | int i; | 79 | int i; |
397 | 80 | ||
398 | if (dev_priv->card_type < NV_40) | 81 | stat = nv_rd32(dev, NV03_PMC_INTR_0); |
399 | return dev_priv->engine.fifo.channels; | 82 | if (!stat) |
400 | else | 83 | return IRQ_NONE; |
401 | if (dev_priv->card_type < NV_50) { | ||
402 | inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4; | ||
403 | |||
404 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
405 | struct nouveau_channel *chan = dev_priv->fifos[i]; | ||
406 | |||
407 | if (!chan || !chan->ramin_grctx) | ||
408 | continue; | ||
409 | |||
410 | if (inst == chan->ramin_grctx->pinst) | ||
411 | break; | ||
412 | } | ||
413 | } else { | ||
414 | inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12; | ||
415 | |||
416 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
417 | struct nouveau_channel *chan = dev_priv->fifos[i]; | ||
418 | |||
419 | if (!chan || !chan->ramin) | ||
420 | continue; | ||
421 | |||
422 | if (inst == chan->ramin->vinst) | ||
423 | break; | ||
424 | } | ||
425 | } | ||
426 | |||
427 | |||
428 | return i; | ||
429 | } | ||
430 | |||
431 | static int | ||
432 | nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) | ||
433 | { | ||
434 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
435 | struct nouveau_engine *engine = &dev_priv->engine; | ||
436 | int channel; | ||
437 | |||
438 | if (dev_priv->card_type < NV_10) | ||
439 | channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf; | ||
440 | else | ||
441 | if (dev_priv->card_type < NV_40) | ||
442 | channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; | ||
443 | else | ||
444 | channel = nouveau_graph_chid_from_grctx(dev); | ||
445 | |||
446 | if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) { | ||
447 | NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel); | ||
448 | return -EINVAL; | ||
449 | } | ||
450 | |||
451 | *channel_ret = channel; | ||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | struct nouveau_pgraph_trap { | ||
456 | int channel; | ||
457 | int class; | ||
458 | int subc, mthd, size; | ||
459 | uint32_t data, data2; | ||
460 | uint32_t nsource, nstatus; | ||
461 | }; | ||
462 | |||
463 | static void | ||
464 | nouveau_graph_trap_info(struct drm_device *dev, | ||
465 | struct nouveau_pgraph_trap *trap) | ||
466 | { | ||
467 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
468 | uint32_t address; | ||
469 | |||
470 | trap->nsource = trap->nstatus = 0; | ||
471 | if (dev_priv->card_type < NV_50) { | ||
472 | trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | ||
473 | trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); | ||
474 | } | ||
475 | |||
476 | if (nouveau_graph_trapped_channel(dev, &trap->channel)) | ||
477 | trap->channel = -1; | ||
478 | address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
479 | |||
480 | trap->mthd = address & 0x1FFC; | ||
481 | trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
482 | if (dev_priv->card_type < NV_10) { | ||
483 | trap->subc = (address >> 13) & 0x7; | ||
484 | } else { | ||
485 | trap->subc = (address >> 16) & 0x7; | ||
486 | trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH); | ||
487 | } | ||
488 | |||
489 | if (dev_priv->card_type < NV_10) | ||
490 | trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF; | ||
491 | else if (dev_priv->card_type < NV_40) | ||
492 | trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF; | ||
493 | else if (dev_priv->card_type < NV_50) | ||
494 | trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF; | ||
495 | else | ||
496 | trap->class = nv_rd32(dev, 0x400814); | ||
497 | } | ||
498 | |||
499 | static void | ||
500 | nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, | ||
501 | struct nouveau_pgraph_trap *trap) | ||
502 | { | ||
503 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
504 | uint32_t nsource = trap->nsource, nstatus = trap->nstatus; | ||
505 | |||
506 | if (dev_priv->card_type < NV_50) { | ||
507 | NV_INFO(dev, "%s - nSource:", id); | ||
508 | nouveau_print_bitfield_names(nsource, nsource_names); | ||
509 | printk(", nStatus:"); | ||
510 | if (dev_priv->card_type < NV_10) | ||
511 | nouveau_print_bitfield_names(nstatus, nstatus_names); | ||
512 | else | ||
513 | nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); | ||
514 | printk("\n"); | ||
515 | } | ||
516 | |||
517 | NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " | ||
518 | "Data 0x%08x:0x%08x\n", | ||
519 | id, trap->channel, trap->subc, | ||
520 | trap->class, trap->mthd, | ||
521 | trap->data2, trap->data); | ||
522 | } | ||
523 | |||
524 | static int | ||
525 | nouveau_pgraph_intr_swmthd(struct drm_device *dev, | ||
526 | struct nouveau_pgraph_trap *trap) | ||
527 | { | ||
528 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
529 | |||
530 | if (trap->channel < 0 || | ||
531 | trap->channel >= dev_priv->engine.fifo.channels || | ||
532 | !dev_priv->fifos[trap->channel]) | ||
533 | return -ENODEV; | ||
534 | |||
535 | return nouveau_call_method(dev_priv->fifos[trap->channel], | ||
536 | trap->class, trap->mthd, trap->data); | ||
537 | } | ||
538 | |||
539 | static inline void | ||
540 | nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) | ||
541 | { | ||
542 | struct nouveau_pgraph_trap trap; | ||
543 | int unhandled = 0; | ||
544 | 84 | ||
545 | nouveau_graph_trap_info(dev, &trap); | 85 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
86 | for (i = 0; i < 32 && stat; i++) { | ||
87 | if (!(stat & (1 << i)) || !dev_priv->irq_handler[i]) | ||
88 | continue; | ||
546 | 89 | ||
547 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | 90 | dev_priv->irq_handler[i](dev); |
548 | if (nouveau_pgraph_intr_swmthd(dev, &trap)) | 91 | stat &= ~(1 << i); |
549 | unhandled = 1; | ||
550 | } else { | ||
551 | unhandled = 1; | ||
552 | } | 92 | } |
553 | 93 | ||
554 | if (unhandled) | 94 | if (dev_priv->msi_enabled) |
555 | nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); | 95 | nv_wr08(dev, 0x00088068, 0xff); |
556 | } | 96 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
557 | |||
558 | |||
559 | static inline void | ||
560 | nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) | ||
561 | { | ||
562 | struct nouveau_pgraph_trap trap; | ||
563 | int unhandled = 0; | ||
564 | |||
565 | nouveau_graph_trap_info(dev, &trap); | ||
566 | trap.nsource = nsource; | ||
567 | |||
568 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | ||
569 | if (nouveau_pgraph_intr_swmthd(dev, &trap)) | ||
570 | unhandled = 1; | ||
571 | } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { | ||
572 | uint32_t v = nv_rd32(dev, 0x402000); | ||
573 | nv_wr32(dev, 0x402000, v); | ||
574 | |||
575 | /* dump the error anyway for now: it's useful for | ||
576 | Gallium development */ | ||
577 | unhandled = 1; | ||
578 | } else { | ||
579 | unhandled = 1; | ||
580 | } | ||
581 | 97 | ||
582 | if (unhandled && nouveau_ratelimit()) | 98 | if (stat && nouveau_ratelimit()) |
583 | nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap); | 99 | NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat); |
100 | return IRQ_HANDLED; | ||
584 | } | 101 | } |
585 | 102 | ||
586 | static inline void | 103 | int |
587 | nouveau_pgraph_intr_context_switch(struct drm_device *dev) | 104 | nouveau_irq_init(struct drm_device *dev) |
588 | { | 105 | { |
589 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 106 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
590 | struct nouveau_engine *engine = &dev_priv->engine; | 107 | int ret; |
591 | uint32_t chid; | ||
592 | |||
593 | chid = engine->fifo.channel_id(dev); | ||
594 | NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid); | ||
595 | |||
596 | switch (dev_priv->card_type) { | ||
597 | case NV_04: | ||
598 | nv04_graph_context_switch(dev); | ||
599 | break; | ||
600 | case NV_10: | ||
601 | nv10_graph_context_switch(dev); | ||
602 | break; | ||
603 | default: | ||
604 | NV_ERROR(dev, "Context switch not implemented\n"); | ||
605 | break; | ||
606 | } | ||
607 | } | ||
608 | |||
609 | static void | ||
610 | nouveau_pgraph_irq_handler(struct drm_device *dev) | ||
611 | { | ||
612 | uint32_t status; | ||
613 | |||
614 | while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { | ||
615 | uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | ||
616 | |||
617 | if (status & NV_PGRAPH_INTR_NOTIFY) { | ||
618 | nouveau_pgraph_intr_notify(dev, nsource); | ||
619 | |||
620 | status &= ~NV_PGRAPH_INTR_NOTIFY; | ||
621 | nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); | ||
622 | } | ||
623 | |||
624 | if (status & NV_PGRAPH_INTR_ERROR) { | ||
625 | nouveau_pgraph_intr_error(dev, nsource); | ||
626 | 108 | ||
627 | status &= ~NV_PGRAPH_INTR_ERROR; | 109 | if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) { |
628 | nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); | 110 | ret = pci_enable_msi(dev->pdev); |
111 | if (ret == 0) { | ||
112 | NV_INFO(dev, "enabled MSI\n"); | ||
113 | dev_priv->msi_enabled = true; | ||
629 | } | 114 | } |
630 | |||
631 | if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { | ||
632 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
633 | nv_wr32(dev, NV03_PGRAPH_INTR, | ||
634 | NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
635 | |||
636 | nouveau_pgraph_intr_context_switch(dev); | ||
637 | } | ||
638 | |||
639 | if (status) { | ||
640 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); | ||
641 | nv_wr32(dev, NV03_PGRAPH_INTR, status); | ||
642 | } | ||
643 | |||
644 | if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0) | ||
645 | nv_wr32(dev, NV04_PGRAPH_FIFO, 1); | ||
646 | } | 115 | } |
647 | 116 | ||
648 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); | 117 | return drm_irq_install(dev); |
649 | } | ||
650 | |||
651 | static struct nouveau_enum_names nv50_mp_exec_error_names[] = | ||
652 | { | ||
653 | { 3, "STACK_UNDERFLOW" }, | ||
654 | { 4, "QUADON_ACTIVE" }, | ||
655 | { 8, "TIMEOUT" }, | ||
656 | { 0x10, "INVALID_OPCODE" }, | ||
657 | { 0x40, "BREAKPOINT" }, | ||
658 | }; | ||
659 | |||
660 | static void | ||
661 | nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) | ||
662 | { | ||
663 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
664 | uint32_t units = nv_rd32(dev, 0x1540); | ||
665 | uint32_t addr, mp10, status, pc, oplow, ophigh; | ||
666 | int i; | ||
667 | int mps = 0; | ||
668 | for (i = 0; i < 4; i++) { | ||
669 | if (!(units & 1 << (i+24))) | ||
670 | continue; | ||
671 | if (dev_priv->chipset < 0xa0) | ||
672 | addr = 0x408200 + (tpid << 12) + (i << 7); | ||
673 | else | ||
674 | addr = 0x408100 + (tpid << 11) + (i << 7); | ||
675 | mp10 = nv_rd32(dev, addr + 0x10); | ||
676 | status = nv_rd32(dev, addr + 0x14); | ||
677 | if (!status) | ||
678 | continue; | ||
679 | if (display) { | ||
680 | nv_rd32(dev, addr + 0x20); | ||
681 | pc = nv_rd32(dev, addr + 0x24); | ||
682 | oplow = nv_rd32(dev, addr + 0x70); | ||
683 | ophigh= nv_rd32(dev, addr + 0x74); | ||
684 | NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " | ||
685 | "TP %d MP %d: ", tpid, i); | ||
686 | nouveau_print_enum_names(status, | ||
687 | nv50_mp_exec_error_names); | ||
688 | printk(" at %06x warp %d, opcode %08x %08x\n", | ||
689 | pc&0xffffff, pc >> 24, | ||
690 | oplow, ophigh); | ||
691 | } | ||
692 | nv_wr32(dev, addr + 0x10, mp10); | ||
693 | nv_wr32(dev, addr + 0x14, 0); | ||
694 | mps++; | ||
695 | } | ||
696 | if (!mps && display) | ||
697 | NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " | ||
698 | "No MPs claiming errors?\n", tpid); | ||
699 | } | 118 | } |
700 | 119 | ||
701 | static void | 120 | void |
702 | nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, | 121 | nouveau_irq_fini(struct drm_device *dev) |
703 | uint32_t ustatus_new, int display, const char *name) | ||
704 | { | 122 | { |
705 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 123 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
706 | int tps = 0; | ||
707 | uint32_t units = nv_rd32(dev, 0x1540); | ||
708 | int i, r; | ||
709 | uint32_t ustatus_addr, ustatus; | ||
710 | for (i = 0; i < 16; i++) { | ||
711 | if (!(units & (1 << i))) | ||
712 | continue; | ||
713 | if (dev_priv->chipset < 0xa0) | ||
714 | ustatus_addr = ustatus_old + (i << 12); | ||
715 | else | ||
716 | ustatus_addr = ustatus_new + (i << 11); | ||
717 | ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; | ||
718 | if (!ustatus) | ||
719 | continue; | ||
720 | tps++; | ||
721 | switch (type) { | ||
722 | case 6: /* texture error... unknown for now */ | ||
723 | nv50_fb_vm_trap(dev, display, name); | ||
724 | if (display) { | ||
725 | NV_ERROR(dev, "magic set %d:\n", i); | ||
726 | for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) | ||
727 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
728 | nv_rd32(dev, r)); | ||
729 | } | ||
730 | break; | ||
731 | case 7: /* MP error */ | ||
732 | if (ustatus & 0x00010000) { | ||
733 | nv50_pgraph_mp_trap(dev, i, display); | ||
734 | ustatus &= ~0x00010000; | ||
735 | } | ||
736 | break; | ||
737 | case 8: /* TPDMA error */ | ||
738 | { | ||
739 | uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); | ||
740 | uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); | ||
741 | uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); | ||
742 | uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); | ||
743 | uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); | ||
744 | uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); | ||
745 | uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); | ||
746 | nv50_fb_vm_trap(dev, display, name); | ||
747 | /* 2d engine destination */ | ||
748 | if (ustatus & 0x00000010) { | ||
749 | if (display) { | ||
750 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", | ||
751 | i, e14, e10); | ||
752 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
753 | i, e0c, e18, e1c, e20, e24); | ||
754 | } | ||
755 | ustatus &= ~0x00000010; | ||
756 | } | ||
757 | /* Render target */ | ||
758 | if (ustatus & 0x00000040) { | ||
759 | if (display) { | ||
760 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", | ||
761 | i, e14, e10); | ||
762 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
763 | i, e0c, e18, e1c, e20, e24); | ||
764 | } | ||
765 | ustatus &= ~0x00000040; | ||
766 | } | ||
767 | /* CUDA memory: l[], g[] or stack. */ | ||
768 | if (ustatus & 0x00000080) { | ||
769 | if (display) { | ||
770 | if (e18 & 0x80000000) { | ||
771 | /* g[] read fault? */ | ||
772 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", | ||
773 | i, e14, e10 | ((e18 >> 24) & 0x1f)); | ||
774 | e18 &= ~0x1f000000; | ||
775 | } else if (e18 & 0xc) { | ||
776 | /* g[] write fault? */ | ||
777 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", | ||
778 | i, e14, e10 | ((e18 >> 7) & 0x1f)); | ||
779 | e18 &= ~0x00000f80; | ||
780 | } else { | ||
781 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", | ||
782 | i, e14, e10); | ||
783 | } | ||
784 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
785 | i, e0c, e18, e1c, e20, e24); | ||
786 | } | ||
787 | ustatus &= ~0x00000080; | ||
788 | } | ||
789 | } | ||
790 | break; | ||
791 | } | ||
792 | if (ustatus) { | ||
793 | if (display) | ||
794 | NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); | ||
795 | } | ||
796 | nv_wr32(dev, ustatus_addr, 0xc0000000); | ||
797 | } | ||
798 | |||
799 | if (!tps && display) | ||
800 | NV_INFO(dev, "%s - No TPs claiming errors?\n", name); | ||
801 | } | ||
802 | |||
803 | static void | ||
804 | nv50_pgraph_trap_handler(struct drm_device *dev) | ||
805 | { | ||
806 | struct nouveau_pgraph_trap trap; | ||
807 | uint32_t status = nv_rd32(dev, 0x400108); | ||
808 | uint32_t ustatus; | ||
809 | int display = nouveau_ratelimit(); | ||
810 | |||
811 | |||
812 | if (!status && display) { | ||
813 | nouveau_graph_trap_info(dev, &trap); | ||
814 | nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap); | ||
815 | NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n"); | ||
816 | } | ||
817 | |||
818 | /* DISPATCH: Relays commands to other units and handles NOTIFY, | ||
819 | * COND, QUERY. If you get a trap from it, the command is still stuck | ||
820 | * in DISPATCH and you need to do something about it. */ | ||
821 | if (status & 0x001) { | ||
822 | ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; | ||
823 | if (!ustatus && display) { | ||
824 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); | ||
825 | } | ||
826 | |||
827 | /* Known to be triggered by screwed up NOTIFY and COND... */ | ||
828 | if (ustatus & 0x00000001) { | ||
829 | nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); | ||
830 | nv_wr32(dev, 0x400500, 0); | ||
831 | if (nv_rd32(dev, 0x400808) & 0x80000000) { | ||
832 | if (display) { | ||
833 | if (nouveau_graph_trapped_channel(dev, &trap.channel)) | ||
834 | trap.channel = -1; | ||
835 | trap.class = nv_rd32(dev, 0x400814); | ||
836 | trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc; | ||
837 | trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7; | ||
838 | trap.data = nv_rd32(dev, 0x40080c); | ||
839 | trap.data2 = nv_rd32(dev, 0x400810); | ||
840 | nouveau_graph_dump_trap_info(dev, | ||
841 | "PGRAPH_TRAP_DISPATCH_FAULT", &trap); | ||
842 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808)); | ||
843 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848)); | ||
844 | } | ||
845 | nv_wr32(dev, 0x400808, 0); | ||
846 | } else if (display) { | ||
847 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n"); | ||
848 | } | ||
849 | nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); | ||
850 | nv_wr32(dev, 0x400848, 0); | ||
851 | ustatus &= ~0x00000001; | ||
852 | } | ||
853 | if (ustatus & 0x00000002) { | ||
854 | nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); | ||
855 | nv_wr32(dev, 0x400500, 0); | ||
856 | if (nv_rd32(dev, 0x40084c) & 0x80000000) { | ||
857 | if (display) { | ||
858 | if (nouveau_graph_trapped_channel(dev, &trap.channel)) | ||
859 | trap.channel = -1; | ||
860 | trap.class = nv_rd32(dev, 0x400814); | ||
861 | trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc; | ||
862 | trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7; | ||
863 | trap.data = nv_rd32(dev, 0x40085c); | ||
864 | trap.data2 = 0; | ||
865 | nouveau_graph_dump_trap_info(dev, | ||
866 | "PGRAPH_TRAP_DISPATCH_QUERY", &trap); | ||
867 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c)); | ||
868 | } | ||
869 | nv_wr32(dev, 0x40084c, 0); | ||
870 | } else if (display) { | ||
871 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n"); | ||
872 | } | ||
873 | ustatus &= ~0x00000002; | ||
874 | } | ||
875 | if (ustatus && display) | ||
876 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus); | ||
877 | nv_wr32(dev, 0x400804, 0xc0000000); | ||
878 | nv_wr32(dev, 0x400108, 0x001); | ||
879 | status &= ~0x001; | ||
880 | } | ||
881 | |||
882 | /* TRAPs other than dispatch use the "normal" trap regs. */ | ||
883 | if (status && display) { | ||
884 | nouveau_graph_trap_info(dev, &trap); | ||
885 | nouveau_graph_dump_trap_info(dev, | ||
886 | "PGRAPH_TRAP", &trap); | ||
887 | } | ||
888 | |||
889 | /* M2MF: Memory to memory copy engine. */ | ||
890 | if (status & 0x002) { | ||
891 | ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; | ||
892 | if (!ustatus && display) { | ||
893 | NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n"); | ||
894 | } | ||
895 | if (ustatus & 0x00000001) { | ||
896 | nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); | ||
897 | ustatus &= ~0x00000001; | ||
898 | } | ||
899 | if (ustatus & 0x00000002) { | ||
900 | nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); | ||
901 | ustatus &= ~0x00000002; | ||
902 | } | ||
903 | if (ustatus & 0x00000004) { | ||
904 | nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); | ||
905 | ustatus &= ~0x00000004; | ||
906 | } | ||
907 | NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n", | ||
908 | nv_rd32(dev, 0x406804), | ||
909 | nv_rd32(dev, 0x406808), | ||
910 | nv_rd32(dev, 0x40680c), | ||
911 | nv_rd32(dev, 0x406810)); | ||
912 | if (ustatus && display) | ||
913 | NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus); | ||
914 | /* No sane way found yet -- just reset the bugger. */ | ||
915 | nv_wr32(dev, 0x400040, 2); | ||
916 | nv_wr32(dev, 0x400040, 0); | ||
917 | nv_wr32(dev, 0x406800, 0xc0000000); | ||
918 | nv_wr32(dev, 0x400108, 0x002); | ||
919 | status &= ~0x002; | ||
920 | } | ||
921 | |||
922 | /* VFETCH: Fetches data from vertex buffers. */ | ||
923 | if (status & 0x004) { | ||
924 | ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; | ||
925 | if (!ustatus && display) { | ||
926 | NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n"); | ||
927 | } | ||
928 | if (ustatus & 0x00000001) { | ||
929 | nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); | ||
930 | NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n", | ||
931 | nv_rd32(dev, 0x400c00), | ||
932 | nv_rd32(dev, 0x400c08), | ||
933 | nv_rd32(dev, 0x400c0c), | ||
934 | nv_rd32(dev, 0x400c10)); | ||
935 | ustatus &= ~0x00000001; | ||
936 | } | ||
937 | if (ustatus && display) | ||
938 | NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus); | ||
939 | nv_wr32(dev, 0x400c04, 0xc0000000); | ||
940 | nv_wr32(dev, 0x400108, 0x004); | ||
941 | status &= ~0x004; | ||
942 | } | ||
943 | |||
944 | /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ | ||
945 | if (status & 0x008) { | ||
946 | ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; | ||
947 | if (!ustatus && display) { | ||
948 | NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n"); | ||
949 | } | ||
950 | if (ustatus & 0x00000001) { | ||
951 | nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); | ||
952 | NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n", | ||
953 | nv_rd32(dev, 0x401804), | ||
954 | nv_rd32(dev, 0x401808), | ||
955 | nv_rd32(dev, 0x40180c), | ||
956 | nv_rd32(dev, 0x401810)); | ||
957 | ustatus &= ~0x00000001; | ||
958 | } | ||
959 | if (ustatus && display) | ||
960 | NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus); | ||
961 | /* No sane way found yet -- just reset the bugger. */ | ||
962 | nv_wr32(dev, 0x400040, 0x80); | ||
963 | nv_wr32(dev, 0x400040, 0); | ||
964 | nv_wr32(dev, 0x401800, 0xc0000000); | ||
965 | nv_wr32(dev, 0x400108, 0x008); | ||
966 | status &= ~0x008; | ||
967 | } | ||
968 | |||
969 | /* CCACHE: Handles code and c[] caches and fills them. */ | ||
970 | if (status & 0x010) { | ||
971 | ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; | ||
972 | if (!ustatus && display) { | ||
973 | NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n"); | ||
974 | } | ||
975 | if (ustatus & 0x00000001) { | ||
976 | nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); | ||
977 | NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n", | ||
978 | nv_rd32(dev, 0x405800), | ||
979 | nv_rd32(dev, 0x405804), | ||
980 | nv_rd32(dev, 0x405808), | ||
981 | nv_rd32(dev, 0x40580c), | ||
982 | nv_rd32(dev, 0x405810), | ||
983 | nv_rd32(dev, 0x405814), | ||
984 | nv_rd32(dev, 0x40581c)); | ||
985 | ustatus &= ~0x00000001; | ||
986 | } | ||
987 | if (ustatus && display) | ||
988 | NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus); | ||
989 | nv_wr32(dev, 0x405018, 0xc0000000); | ||
990 | nv_wr32(dev, 0x400108, 0x010); | ||
991 | status &= ~0x010; | ||
992 | } | ||
993 | |||
994 | /* Unknown, not seen yet... 0x402000 is the only trap status reg | ||
995 | * remaining, so try to handle it anyway. Perhaps related to that | ||
996 | * unknown DMA slot on tesla? */ | ||
997 | if (status & 0x20) { | ||
998 | nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); | ||
999 | ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; | ||
1000 | if (display) | ||
1001 | NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus); | ||
1002 | nv_wr32(dev, 0x402000, 0xc0000000); | ||
1003 | /* no status modifiction on purpose */ | ||
1004 | } | ||
1005 | |||
1006 | /* TEXTURE: CUDA texturing units */ | ||
1007 | if (status & 0x040) { | ||
1008 | nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display, | ||
1009 | "PGRAPH_TRAP_TEXTURE"); | ||
1010 | nv_wr32(dev, 0x400108, 0x040); | ||
1011 | status &= ~0x040; | ||
1012 | } | ||
1013 | |||
1014 | /* MP: CUDA execution engines. */ | ||
1015 | if (status & 0x080) { | ||
1016 | nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display, | ||
1017 | "PGRAPH_TRAP_MP"); | ||
1018 | nv_wr32(dev, 0x400108, 0x080); | ||
1019 | status &= ~0x080; | ||
1020 | } | ||
1021 | |||
1022 | /* TPDMA: Handles TP-initiated uncached memory accesses: | ||
1023 | * l[], g[], stack, 2d surfaces, render targets. */ | ||
1024 | if (status & 0x100) { | ||
1025 | nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display, | ||
1026 | "PGRAPH_TRAP_TPDMA"); | ||
1027 | nv_wr32(dev, 0x400108, 0x100); | ||
1028 | status &= ~0x100; | ||
1029 | } | ||
1030 | |||
1031 | if (status) { | ||
1032 | if (display) | ||
1033 | NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n", | ||
1034 | status); | ||
1035 | nv_wr32(dev, 0x400108, status); | ||
1036 | } | ||
1037 | } | ||
1038 | |||
1039 | /* There must be a *lot* of these. Will take some time to gather them up. */ | ||
1040 | static struct nouveau_enum_names nv50_data_error_names[] = | ||
1041 | { | ||
1042 | { 4, "INVALID_VALUE" }, | ||
1043 | { 5, "INVALID_ENUM" }, | ||
1044 | { 8, "INVALID_OBJECT" }, | ||
1045 | { 0xc, "INVALID_BITFIELD" }, | ||
1046 | { 0x28, "MP_NO_REG_SPACE" }, | ||
1047 | { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, | ||
1048 | }; | ||
1049 | |||
1050 | static void | ||
1051 | nv50_pgraph_irq_handler(struct drm_device *dev) | ||
1052 | { | ||
1053 | struct nouveau_pgraph_trap trap; | ||
1054 | int unhandled = 0; | ||
1055 | uint32_t status; | ||
1056 | |||
1057 | while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { | ||
1058 | /* NOTIFY: You've set a NOTIFY an a command and it's done. */ | ||
1059 | if (status & 0x00000001) { | ||
1060 | nouveau_graph_trap_info(dev, &trap); | ||
1061 | if (nouveau_ratelimit()) | ||
1062 | nouveau_graph_dump_trap_info(dev, | ||
1063 | "PGRAPH_NOTIFY", &trap); | ||
1064 | status &= ~0x00000001; | ||
1065 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); | ||
1066 | } | ||
1067 | |||
1068 | /* COMPUTE_QUERY: Purpose and exact cause unknown, happens | ||
1069 | * when you write 0x200 to 0x50c0 method 0x31c. */ | ||
1070 | if (status & 0x00000002) { | ||
1071 | nouveau_graph_trap_info(dev, &trap); | ||
1072 | if (nouveau_ratelimit()) | ||
1073 | nouveau_graph_dump_trap_info(dev, | ||
1074 | "PGRAPH_COMPUTE_QUERY", &trap); | ||
1075 | status &= ~0x00000002; | ||
1076 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002); | ||
1077 | } | ||
1078 | |||
1079 | /* Unknown, never seen: 0x4 */ | ||
1080 | |||
1081 | /* ILLEGAL_MTHD: You used a wrong method for this class. */ | ||
1082 | if (status & 0x00000010) { | ||
1083 | nouveau_graph_trap_info(dev, &trap); | ||
1084 | if (nouveau_pgraph_intr_swmthd(dev, &trap)) | ||
1085 | unhandled = 1; | ||
1086 | if (unhandled && nouveau_ratelimit()) | ||
1087 | nouveau_graph_dump_trap_info(dev, | ||
1088 | "PGRAPH_ILLEGAL_MTHD", &trap); | ||
1089 | status &= ~0x00000010; | ||
1090 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); | ||
1091 | } | ||
1092 | |||
1093 | /* ILLEGAL_CLASS: You used a wrong class. */ | ||
1094 | if (status & 0x00000020) { | ||
1095 | nouveau_graph_trap_info(dev, &trap); | ||
1096 | if (nouveau_ratelimit()) | ||
1097 | nouveau_graph_dump_trap_info(dev, | ||
1098 | "PGRAPH_ILLEGAL_CLASS", &trap); | ||
1099 | status &= ~0x00000020; | ||
1100 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020); | ||
1101 | } | ||
1102 | |||
1103 | /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */ | ||
1104 | if (status & 0x00000040) { | ||
1105 | nouveau_graph_trap_info(dev, &trap); | ||
1106 | if (nouveau_ratelimit()) | ||
1107 | nouveau_graph_dump_trap_info(dev, | ||
1108 | "PGRAPH_DOUBLE_NOTIFY", &trap); | ||
1109 | status &= ~0x00000040; | ||
1110 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040); | ||
1111 | } | ||
1112 | |||
1113 | /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */ | ||
1114 | if (status & 0x00001000) { | ||
1115 | nv_wr32(dev, 0x400500, 0x00000000); | ||
1116 | nv_wr32(dev, NV03_PGRAPH_INTR, | ||
1117 | NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
1118 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, | ||
1119 | NV40_PGRAPH_INTR_EN) & | ||
1120 | ~NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
1121 | nv_wr32(dev, 0x400500, 0x00010001); | ||
1122 | |||
1123 | nv50_graph_context_switch(dev); | ||
1124 | |||
1125 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
1126 | } | ||
1127 | |||
1128 | /* BUFFER_NOTIFY: Your m2mf transfer finished */ | ||
1129 | if (status & 0x00010000) { | ||
1130 | nouveau_graph_trap_info(dev, &trap); | ||
1131 | if (nouveau_ratelimit()) | ||
1132 | nouveau_graph_dump_trap_info(dev, | ||
1133 | "PGRAPH_BUFFER_NOTIFY", &trap); | ||
1134 | status &= ~0x00010000; | ||
1135 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000); | ||
1136 | } | ||
1137 | |||
1138 | /* DATA_ERROR: Invalid value for this method, or invalid | ||
1139 | * state in current PGRAPH context for this operation */ | ||
1140 | if (status & 0x00100000) { | ||
1141 | nouveau_graph_trap_info(dev, &trap); | ||
1142 | if (nouveau_ratelimit()) { | ||
1143 | nouveau_graph_dump_trap_info(dev, | ||
1144 | "PGRAPH_DATA_ERROR", &trap); | ||
1145 | NV_INFO (dev, "PGRAPH_DATA_ERROR - "); | ||
1146 | nouveau_print_enum_names(nv_rd32(dev, 0x400110), | ||
1147 | nv50_data_error_names); | ||
1148 | printk("\n"); | ||
1149 | } | ||
1150 | status &= ~0x00100000; | ||
1151 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); | ||
1152 | } | ||
1153 | 124 | ||
1154 | /* TRAP: Something bad happened in the middle of command | 125 | drm_irq_uninstall(dev); |
1155 | * execution. Has a billion types, subtypes, and even | 126 | if (dev_priv->msi_enabled) |
1156 | * subsubtypes. */ | 127 | pci_disable_msi(dev->pdev); |
1157 | if (status & 0x00200000) { | ||
1158 | nv50_pgraph_trap_handler(dev); | ||
1159 | status &= ~0x00200000; | ||
1160 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); | ||
1161 | } | ||
1162 | |||
1163 | /* Unknown, never seen: 0x00400000 */ | ||
1164 | |||
1165 | /* SINGLE_STEP: Happens on every method if you turned on | ||
1166 | * single stepping in 40008c */ | ||
1167 | if (status & 0x01000000) { | ||
1168 | nouveau_graph_trap_info(dev, &trap); | ||
1169 | if (nouveau_ratelimit()) | ||
1170 | nouveau_graph_dump_trap_info(dev, | ||
1171 | "PGRAPH_SINGLE_STEP", &trap); | ||
1172 | status &= ~0x01000000; | ||
1173 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000); | ||
1174 | } | ||
1175 | |||
1176 | /* 0x02000000 happens when you pause a ctxprog... | ||
1177 | * but the only way this can happen that I know is by | ||
1178 | * poking the relevant MMIO register, and we don't | ||
1179 | * do that. */ | ||
1180 | |||
1181 | if (status) { | ||
1182 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", | ||
1183 | status); | ||
1184 | nv_wr32(dev, NV03_PGRAPH_INTR, status); | ||
1185 | } | ||
1186 | |||
1187 | { | ||
1188 | const int isb = (1 << 16) | (1 << 0); | ||
1189 | |||
1190 | if ((nv_rd32(dev, 0x400500) & isb) != isb) | ||
1191 | nv_wr32(dev, 0x400500, | ||
1192 | nv_rd32(dev, 0x400500) | isb); | ||
1193 | } | ||
1194 | } | ||
1195 | |||
1196 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); | ||
1197 | if (nv_rd32(dev, 0x400824) & (1 << 31)) | ||
1198 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | ||
1199 | } | 128 | } |
1200 | 129 | ||
1201 | static void | 130 | void |
1202 | nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) | 131 | nouveau_irq_register(struct drm_device *dev, int status_bit, |
132 | void (*handler)(struct drm_device *)) | ||
1203 | { | 133 | { |
1204 | if (crtc & 1) | 134 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
1205 | nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); | 135 | unsigned long flags; |
1206 | 136 | ||
1207 | if (crtc & 2) | 137 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
1208 | nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); | 138 | dev_priv->irq_handler[status_bit] = handler; |
139 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
1209 | } | 140 | } |
1210 | 141 | ||
1211 | irqreturn_t | 142 | void |
1212 | nouveau_irq_handler(DRM_IRQ_ARGS) | 143 | nouveau_irq_unregister(struct drm_device *dev, int status_bit) |
1213 | { | 144 | { |
1214 | struct drm_device *dev = (struct drm_device *)arg; | ||
1215 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 145 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
1216 | uint32_t status; | ||
1217 | unsigned long flags; | 146 | unsigned long flags; |
1218 | 147 | ||
1219 | status = nv_rd32(dev, NV03_PMC_INTR_0); | ||
1220 | if (!status) | ||
1221 | return IRQ_NONE; | ||
1222 | |||
1223 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 148 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
1224 | 149 | dev_priv->irq_handler[status_bit] = NULL; | |
1225 | if (status & NV_PMC_INTR_0_PFIFO_PENDING) { | ||
1226 | nouveau_fifo_irq_handler(dev); | ||
1227 | status &= ~NV_PMC_INTR_0_PFIFO_PENDING; | ||
1228 | } | ||
1229 | |||
1230 | if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { | ||
1231 | if (dev_priv->card_type >= NV_50) | ||
1232 | nv50_pgraph_irq_handler(dev); | ||
1233 | else | ||
1234 | nouveau_pgraph_irq_handler(dev); | ||
1235 | |||
1236 | status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; | ||
1237 | } | ||
1238 | |||
1239 | if (status & NV_PMC_INTR_0_CRTCn_PENDING) { | ||
1240 | nouveau_crtc_irq_handler(dev, (status>>24)&3); | ||
1241 | status &= ~NV_PMC_INTR_0_CRTCn_PENDING; | ||
1242 | } | ||
1243 | |||
1244 | if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING | | ||
1245 | NV_PMC_INTR_0_NV50_I2C_PENDING)) { | ||
1246 | nv50_display_irq_handler(dev); | ||
1247 | status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING | | ||
1248 | NV_PMC_INTR_0_NV50_I2C_PENDING); | ||
1249 | } | ||
1250 | |||
1251 | if (status) | ||
1252 | NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status); | ||
1253 | |||
1254 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 150 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
1255 | |||
1256 | return IRQ_HANDLED; | ||
1257 | } | 151 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index fe4a30dc4b42..224181193a1f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -36,183 +36,112 @@ | |||
36 | 36 | ||
37 | #include "nouveau_drv.h" | 37 | #include "nouveau_drv.h" |
38 | #include "nouveau_pm.h" | 38 | #include "nouveau_pm.h" |
39 | #include "nouveau_mm.h" | ||
40 | #include "nouveau_vm.h" | ||
39 | 41 | ||
40 | /* | 42 | /* |
41 | * NV10-NV40 tiling helpers | 43 | * NV10-NV40 tiling helpers |
42 | */ | 44 | */ |
43 | 45 | ||
44 | static void | 46 | static void |
45 | nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | 47 | nv10_mem_update_tile_region(struct drm_device *dev, |
46 | uint32_t size, uint32_t pitch) | 48 | struct nouveau_tile_reg *tile, uint32_t addr, |
49 | uint32_t size, uint32_t pitch, uint32_t flags) | ||
47 | { | 50 | { |
48 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 51 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
49 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 52 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; |
50 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | 53 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
51 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | 54 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
52 | struct nouveau_tile_reg *tile = &dev_priv->tile[i]; | 55 | int i = tile - dev_priv->tile.reg; |
56 | unsigned long save; | ||
53 | 57 | ||
54 | tile->addr = addr; | 58 | nouveau_fence_unref(&tile->fence); |
55 | tile->size = size; | ||
56 | tile->used = !!pitch; | ||
57 | nouveau_fence_unref((void **)&tile->fence); | ||
58 | 59 | ||
60 | if (tile->pitch) | ||
61 | pfb->free_tile_region(dev, i); | ||
62 | |||
63 | if (pitch) | ||
64 | pfb->init_tile_region(dev, i, addr, size, pitch, flags); | ||
65 | |||
66 | spin_lock_irqsave(&dev_priv->context_switch_lock, save); | ||
59 | pfifo->reassign(dev, false); | 67 | pfifo->reassign(dev, false); |
60 | pfifo->cache_pull(dev, false); | 68 | pfifo->cache_pull(dev, false); |
61 | 69 | ||
62 | nouveau_wait_for_idle(dev); | 70 | nouveau_wait_for_idle(dev); |
63 | 71 | ||
64 | pgraph->set_region_tiling(dev, i, addr, size, pitch); | 72 | pfb->set_tile_region(dev, i); |
65 | pfb->set_region_tiling(dev, i, addr, size, pitch); | 73 | pgraph->set_tile_region(dev, i); |
66 | 74 | ||
67 | pfifo->cache_pull(dev, true); | 75 | pfifo->cache_pull(dev, true); |
68 | pfifo->reassign(dev, true); | 76 | pfifo->reassign(dev, true); |
77 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); | ||
69 | } | 78 | } |
70 | 79 | ||
71 | struct nouveau_tile_reg * | 80 | static struct nouveau_tile_reg * |
72 | nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, | 81 | nv10_mem_get_tile_region(struct drm_device *dev, int i) |
73 | uint32_t pitch) | ||
74 | { | 82 | { |
75 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 83 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
76 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | 84 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
77 | struct nouveau_tile_reg *found = NULL; | ||
78 | unsigned long i, flags; | ||
79 | |||
80 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
81 | |||
82 | for (i = 0; i < pfb->num_tiles; i++) { | ||
83 | struct nouveau_tile_reg *tile = &dev_priv->tile[i]; | ||
84 | |||
85 | if (tile->used) | ||
86 | /* Tile region in use. */ | ||
87 | continue; | ||
88 | 85 | ||
89 | if (tile->fence && | 86 | spin_lock(&dev_priv->tile.lock); |
90 | !nouveau_fence_signalled(tile->fence, NULL)) | ||
91 | /* Pending tile region. */ | ||
92 | continue; | ||
93 | |||
94 | if (max(tile->addr, addr) < | ||
95 | min(tile->addr + tile->size, addr + size)) | ||
96 | /* Kill an intersecting tile region. */ | ||
97 | nv10_mem_set_region_tiling(dev, i, 0, 0, 0); | ||
98 | |||
99 | if (pitch && !found) { | ||
100 | /* Free tile region. */ | ||
101 | nv10_mem_set_region_tiling(dev, i, addr, size, pitch); | ||
102 | found = tile; | ||
103 | } | ||
104 | } | ||
105 | 87 | ||
106 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 88 | if (!tile->used && |
89 | (!tile->fence || nouveau_fence_signalled(tile->fence))) | ||
90 | tile->used = true; | ||
91 | else | ||
92 | tile = NULL; | ||
107 | 93 | ||
108 | return found; | 94 | spin_unlock(&dev_priv->tile.lock); |
95 | return tile; | ||
109 | } | 96 | } |
110 | 97 | ||
111 | void | 98 | void |
112 | nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, | 99 | nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, |
113 | struct nouveau_fence *fence) | 100 | struct nouveau_fence *fence) |
114 | { | ||
115 | if (fence) { | ||
116 | /* Mark it as pending. */ | ||
117 | tile->fence = fence; | ||
118 | nouveau_fence_ref(fence); | ||
119 | } | ||
120 | |||
121 | tile->used = false; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * NV50 VM helpers | ||
126 | */ | ||
127 | int | ||
128 | nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | ||
129 | uint32_t flags, uint64_t phys) | ||
130 | { | 101 | { |
131 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 102 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
132 | struct nouveau_gpuobj *pgt; | ||
133 | unsigned block; | ||
134 | int i; | ||
135 | 103 | ||
136 | virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; | 104 | if (tile) { |
137 | size = (size >> 16) << 1; | 105 | spin_lock(&dev_priv->tile.lock); |
138 | 106 | if (fence) { | |
139 | phys |= ((uint64_t)flags << 32); | 107 | /* Mark it as pending. */ |
140 | phys |= 1; | 108 | tile->fence = fence; |
141 | if (dev_priv->vram_sys_base) { | 109 | nouveau_fence_ref(fence); |
142 | phys += dev_priv->vram_sys_base; | ||
143 | phys |= 0x30; | ||
144 | } | ||
145 | |||
146 | while (size) { | ||
147 | unsigned offset_h = upper_32_bits(phys); | ||
148 | unsigned offset_l = lower_32_bits(phys); | ||
149 | unsigned pte, end; | ||
150 | |||
151 | for (i = 7; i >= 0; i--) { | ||
152 | block = 1 << (i + 1); | ||
153 | if (size >= block && !(virt & (block - 1))) | ||
154 | break; | ||
155 | } | 110 | } |
156 | offset_l |= (i << 7); | ||
157 | |||
158 | phys += block << 15; | ||
159 | size -= block; | ||
160 | |||
161 | while (block) { | ||
162 | pgt = dev_priv->vm_vram_pt[virt >> 14]; | ||
163 | pte = virt & 0x3ffe; | ||
164 | |||
165 | end = pte + block; | ||
166 | if (end > 16384) | ||
167 | end = 16384; | ||
168 | block -= (end - pte); | ||
169 | virt += (end - pte); | ||
170 | |||
171 | while (pte < end) { | ||
172 | nv_wo32(pgt, (pte * 4) + 0, offset_l); | ||
173 | nv_wo32(pgt, (pte * 4) + 4, offset_h); | ||
174 | pte += 2; | ||
175 | } | ||
176 | } | ||
177 | } | ||
178 | 111 | ||
179 | dev_priv->engine.instmem.flush(dev); | 112 | tile->used = false; |
180 | dev_priv->engine.fifo.tlb_flush(dev); | 113 | spin_unlock(&dev_priv->tile.lock); |
181 | dev_priv->engine.graph.tlb_flush(dev); | 114 | } |
182 | nv50_vm_flush(dev, 6); | ||
183 | return 0; | ||
184 | } | 115 | } |
185 | 116 | ||
186 | void | 117 | struct nouveau_tile_reg * |
187 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | 118 | nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, |
119 | uint32_t pitch, uint32_t flags) | ||
188 | { | 120 | { |
189 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 121 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
190 | struct nouveau_gpuobj *pgt; | 122 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
191 | unsigned pages, pte, end; | 123 | struct nouveau_tile_reg *tile, *found = NULL; |
192 | 124 | int i; | |
193 | virt -= dev_priv->vm_vram_base; | ||
194 | pages = (size >> 16) << 1; | ||
195 | 125 | ||
196 | while (pages) { | 126 | for (i = 0; i < pfb->num_tiles; i++) { |
197 | pgt = dev_priv->vm_vram_pt[virt >> 29]; | 127 | tile = nv10_mem_get_tile_region(dev, i); |
198 | pte = (virt & 0x1ffe0000ULL) >> 15; | ||
199 | 128 | ||
200 | end = pte + pages; | 129 | if (pitch && !found) { |
201 | if (end > 16384) | 130 | found = tile; |
202 | end = 16384; | 131 | continue; |
203 | pages -= (end - pte); | ||
204 | virt += (end - pte) << 15; | ||
205 | 132 | ||
206 | while (pte < end) { | 133 | } else if (tile && tile->pitch) { |
207 | nv_wo32(pgt, (pte * 4), 0); | 134 | /* Kill an unused tile region. */ |
208 | pte++; | 135 | nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0); |
209 | } | 136 | } |
137 | |||
138 | nv10_mem_put_tile_region(dev, tile, NULL); | ||
210 | } | 139 | } |
211 | 140 | ||
212 | dev_priv->engine.instmem.flush(dev); | 141 | if (found) |
213 | dev_priv->engine.fifo.tlb_flush(dev); | 142 | nv10_mem_update_tile_region(dev, found, addr, size, |
214 | dev_priv->engine.graph.tlb_flush(dev); | 143 | pitch, flags); |
215 | nv50_vm_flush(dev, 6); | 144 | return found; |
216 | } | 145 | } |
217 | 146 | ||
218 | /* | 147 | /* |
@@ -312,62 +241,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev) | |||
312 | return 0; | 241 | return 0; |
313 | } | 242 | } |
314 | 243 | ||
315 | static void | 244 | int |
316 | nv50_vram_preinit(struct drm_device *dev) | ||
317 | { | ||
318 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
319 | int i, parts, colbits, rowbitsa, rowbitsb, banks; | ||
320 | u64 rowsize, predicted; | ||
321 | u32 r0, r4, rt, ru; | ||
322 | |||
323 | r0 = nv_rd32(dev, 0x100200); | ||
324 | r4 = nv_rd32(dev, 0x100204); | ||
325 | rt = nv_rd32(dev, 0x100250); | ||
326 | ru = nv_rd32(dev, 0x001540); | ||
327 | NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); | ||
328 | |||
329 | for (i = 0, parts = 0; i < 8; i++) { | ||
330 | if (ru & (0x00010000 << i)) | ||
331 | parts++; | ||
332 | } | ||
333 | |||
334 | colbits = (r4 & 0x0000f000) >> 12; | ||
335 | rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; | ||
336 | rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; | ||
337 | banks = ((r4 & 0x01000000) ? 8 : 4); | ||
338 | |||
339 | rowsize = parts * banks * (1 << colbits) * 8; | ||
340 | predicted = rowsize << rowbitsa; | ||
341 | if (r0 & 0x00000004) | ||
342 | predicted += rowsize << rowbitsb; | ||
343 | |||
344 | if (predicted != dev_priv->vram_size) { | ||
345 | NV_WARN(dev, "memory controller reports %dMiB VRAM\n", | ||
346 | (u32)(dev_priv->vram_size >> 20)); | ||
347 | NV_WARN(dev, "we calculated %dMiB VRAM\n", | ||
348 | (u32)(predicted >> 20)); | ||
349 | } | ||
350 | |||
351 | dev_priv->vram_rblock_size = rowsize >> 12; | ||
352 | if (rt & 1) | ||
353 | dev_priv->vram_rblock_size *= 3; | ||
354 | |||
355 | NV_DEBUG(dev, "rblock %lld bytes\n", | ||
356 | (u64)dev_priv->vram_rblock_size << 12); | ||
357 | } | ||
358 | |||
359 | static void | ||
360 | nvaa_vram_preinit(struct drm_device *dev) | ||
361 | { | ||
362 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
363 | |||
364 | /* To our knowledge, there's no large scale reordering of pages | ||
365 | * that occurs on IGP chipsets. | ||
366 | */ | ||
367 | dev_priv->vram_rblock_size = 1; | ||
368 | } | ||
369 | |||
370 | static int | ||
371 | nouveau_mem_detect(struct drm_device *dev) | 245 | nouveau_mem_detect(struct drm_device *dev) |
372 | { | 246 | { |
373 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 247 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -381,40 +255,25 @@ nouveau_mem_detect(struct drm_device *dev) | |||
381 | if (dev_priv->card_type < NV_50) { | 255 | if (dev_priv->card_type < NV_50) { |
382 | dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); | 256 | dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); |
383 | dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; | 257 | dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; |
384 | } else | ||
385 | if (dev_priv->card_type < NV_C0) { | ||
386 | dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); | ||
387 | dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; | ||
388 | dev_priv->vram_size &= 0xffffffff00ll; | ||
389 | |||
390 | switch (dev_priv->chipset) { | ||
391 | case 0xaa: | ||
392 | case 0xac: | ||
393 | case 0xaf: | ||
394 | dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); | ||
395 | dev_priv->vram_sys_base <<= 12; | ||
396 | nvaa_vram_preinit(dev); | ||
397 | break; | ||
398 | default: | ||
399 | nv50_vram_preinit(dev); | ||
400 | break; | ||
401 | } | ||
402 | } else { | 258 | } else { |
403 | dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; | 259 | dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; |
404 | dev_priv->vram_size *= nv_rd32(dev, 0x121c74); | 260 | dev_priv->vram_size *= nv_rd32(dev, 0x121c74); |
405 | } | 261 | } |
406 | 262 | ||
407 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); | ||
408 | if (dev_priv->vram_sys_base) { | ||
409 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", | ||
410 | dev_priv->vram_sys_base); | ||
411 | } | ||
412 | |||
413 | if (dev_priv->vram_size) | 263 | if (dev_priv->vram_size) |
414 | return 0; | 264 | return 0; |
415 | return -ENOMEM; | 265 | return -ENOMEM; |
416 | } | 266 | } |
417 | 267 | ||
268 | bool | ||
269 | nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags) | ||
270 | { | ||
271 | if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) | ||
272 | return true; | ||
273 | |||
274 | return false; | ||
275 | } | ||
276 | |||
418 | #if __OS_HAS_AGP | 277 | #if __OS_HAS_AGP |
419 | static unsigned long | 278 | static unsigned long |
420 | get_agp_mode(struct drm_device *dev, unsigned long mode) | 279 | get_agp_mode(struct drm_device *dev, unsigned long mode) |
@@ -547,10 +406,6 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
547 | if (ret) | 406 | if (ret) |
548 | return ret; | 407 | return ret; |
549 | 408 | ||
550 | ret = nouveau_mem_detect(dev); | ||
551 | if (ret) | ||
552 | return ret; | ||
553 | |||
554 | dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); | 409 | dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); |
555 | 410 | ||
556 | ret = nouveau_ttm_global_init(dev_priv); | 411 | ret = nouveau_ttm_global_init(dev_priv); |
@@ -566,13 +421,6 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
566 | return ret; | 421 | return ret; |
567 | } | 422 | } |
568 | 423 | ||
569 | dev_priv->fb_available_size = dev_priv->vram_size; | ||
570 | dev_priv->fb_mappable_pages = dev_priv->fb_available_size; | ||
571 | if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) | ||
572 | dev_priv->fb_mappable_pages = | ||
573 | pci_resource_len(dev->pdev, 1); | ||
574 | dev_priv->fb_mappable_pages >>= PAGE_SHIFT; | ||
575 | |||
576 | /* reserve space at end of VRAM for PRAMIN */ | 424 | /* reserve space at end of VRAM for PRAMIN */ |
577 | if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 || | 425 | if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 || |
578 | dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) | 426 | dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) |
@@ -583,6 +431,22 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
583 | else | 431 | else |
584 | dev_priv->ramin_rsvd_vram = (512 * 1024); | 432 | dev_priv->ramin_rsvd_vram = (512 * 1024); |
585 | 433 | ||
434 | ret = dev_priv->engine.vram.init(dev); | ||
435 | if (ret) | ||
436 | return ret; | ||
437 | |||
438 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); | ||
439 | if (dev_priv->vram_sys_base) { | ||
440 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", | ||
441 | dev_priv->vram_sys_base); | ||
442 | } | ||
443 | |||
444 | dev_priv->fb_available_size = dev_priv->vram_size; | ||
445 | dev_priv->fb_mappable_pages = dev_priv->fb_available_size; | ||
446 | if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) | ||
447 | dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1); | ||
448 | dev_priv->fb_mappable_pages >>= PAGE_SHIFT; | ||
449 | |||
586 | dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; | 450 | dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; |
587 | dev_priv->fb_aper_free = dev_priv->fb_available_size; | 451 | dev_priv->fb_aper_free = dev_priv->fb_available_size; |
588 | 452 | ||
@@ -799,3 +663,114 @@ nouveau_mem_timing_fini(struct drm_device *dev) | |||
799 | 663 | ||
800 | kfree(mem->timing); | 664 | kfree(mem->timing); |
801 | } | 665 | } |
666 | |||
667 | static int | ||
668 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size) | ||
669 | { | ||
670 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | ||
671 | struct nouveau_mm *mm; | ||
672 | u32 b_size; | ||
673 | int ret; | ||
674 | |||
675 | p_size = (p_size << PAGE_SHIFT) >> 12; | ||
676 | b_size = dev_priv->vram_rblock_size >> 12; | ||
677 | |||
678 | ret = nouveau_mm_init(&mm, 0, p_size, b_size); | ||
679 | if (ret) | ||
680 | return ret; | ||
681 | |||
682 | man->priv = mm; | ||
683 | return 0; | ||
684 | } | ||
685 | |||
686 | static int | ||
687 | nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) | ||
688 | { | ||
689 | struct nouveau_mm *mm = man->priv; | ||
690 | int ret; | ||
691 | |||
692 | ret = nouveau_mm_fini(&mm); | ||
693 | if (ret) | ||
694 | return ret; | ||
695 | |||
696 | man->priv = NULL; | ||
697 | return 0; | ||
698 | } | ||
699 | |||
700 | static void | ||
701 | nouveau_vram_manager_del(struct ttm_mem_type_manager *man, | ||
702 | struct ttm_mem_reg *mem) | ||
703 | { | ||
704 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | ||
705 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
706 | struct drm_device *dev = dev_priv->dev; | ||
707 | |||
708 | vram->put(dev, (struct nouveau_vram **)&mem->mm_node); | ||
709 | } | ||
710 | |||
711 | static int | ||
712 | nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | ||
713 | struct ttm_buffer_object *bo, | ||
714 | struct ttm_placement *placement, | ||
715 | struct ttm_mem_reg *mem) | ||
716 | { | ||
717 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | ||
718 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
719 | struct drm_device *dev = dev_priv->dev; | ||
720 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
721 | struct nouveau_vram *node; | ||
722 | u32 size_nc = 0; | ||
723 | int ret; | ||
724 | |||
725 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) | ||
726 | size_nc = 1 << nvbo->vma.node->type; | ||
727 | |||
728 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, | ||
729 | mem->page_alignment << PAGE_SHIFT, size_nc, | ||
730 | (nvbo->tile_flags >> 8) & 0xff, &node); | ||
731 | if (ret) | ||
732 | return ret; | ||
733 | |||
734 | mem->mm_node = node; | ||
735 | mem->start = node->offset >> PAGE_SHIFT; | ||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | void | ||
740 | nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) | ||
741 | { | ||
742 | struct nouveau_mm *mm = man->priv; | ||
743 | struct nouveau_mm_node *r; | ||
744 | u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; | ||
745 | int i; | ||
746 | |||
747 | mutex_lock(&mm->mutex); | ||
748 | list_for_each_entry(r, &mm->nodes, nl_entry) { | ||
749 | printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n", | ||
750 | prefix, r->free ? "free" : "used", r->type, | ||
751 | ((u64)r->offset << 12), | ||
752 | (((u64)r->offset + r->length) << 12)); | ||
753 | total += r->length; | ||
754 | ttotal[r->type] += r->length; | ||
755 | if (r->free) | ||
756 | tfree[r->type] += r->length; | ||
757 | else | ||
758 | tused[r->type] += r->length; | ||
759 | } | ||
760 | mutex_unlock(&mm->mutex); | ||
761 | |||
762 | printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12); | ||
763 | for (i = 0; i < 3; i++) { | ||
764 | printk(KERN_DEBUG "%s type %d: 0x%010llx, " | ||
765 | "used 0x%010llx, free 0x%010llx\n", prefix, | ||
766 | i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12); | ||
767 | } | ||
768 | } | ||
769 | |||
770 | const struct ttm_mem_type_manager_func nouveau_vram_manager = { | ||
771 | nouveau_vram_manager_init, | ||
772 | nouveau_vram_manager_fini, | ||
773 | nouveau_vram_manager_new, | ||
774 | nouveau_vram_manager_del, | ||
775 | nouveau_vram_manager_debug | ||
776 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c new file mode 100644 index 000000000000..cdbb11eb701b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_mm.h" | ||
28 | |||
29 | static inline void | ||
30 | region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) | ||
31 | { | ||
32 | list_del(&a->nl_entry); | ||
33 | list_del(&a->fl_entry); | ||
34 | kfree(a); | ||
35 | } | ||
36 | |||
37 | static struct nouveau_mm_node * | ||
38 | region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) | ||
39 | { | ||
40 | struct nouveau_mm_node *b; | ||
41 | |||
42 | if (a->length == size) | ||
43 | return a; | ||
44 | |||
45 | b = kmalloc(sizeof(*b), GFP_KERNEL); | ||
46 | if (unlikely(b == NULL)) | ||
47 | return NULL; | ||
48 | |||
49 | b->offset = a->offset; | ||
50 | b->length = size; | ||
51 | b->free = a->free; | ||
52 | b->type = a->type; | ||
53 | a->offset += size; | ||
54 | a->length -= size; | ||
55 | list_add_tail(&b->nl_entry, &a->nl_entry); | ||
56 | if (b->free) | ||
57 | list_add_tail(&b->fl_entry, &a->fl_entry); | ||
58 | return b; | ||
59 | } | ||
60 | |||
61 | static struct nouveau_mm_node * | ||
62 | nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) | ||
63 | { | ||
64 | struct nouveau_mm_node *prev, *next; | ||
65 | |||
66 | /* try to merge with free adjacent entries of same type */ | ||
67 | prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry); | ||
68 | if (this->nl_entry.prev != &rmm->nodes) { | ||
69 | if (prev->free && prev->type == this->type) { | ||
70 | prev->length += this->length; | ||
71 | region_put(rmm, this); | ||
72 | this = prev; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); | ||
77 | if (this->nl_entry.next != &rmm->nodes) { | ||
78 | if (next->free && next->type == this->type) { | ||
79 | next->offset = this->offset; | ||
80 | next->length += this->length; | ||
81 | region_put(rmm, this); | ||
82 | this = next; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | return this; | ||
87 | } | ||
88 | |||
89 | void | ||
90 | nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) | ||
91 | { | ||
92 | u32 block_s, block_l; | ||
93 | |||
94 | this->free = true; | ||
95 | list_add(&this->fl_entry, &rmm->free); | ||
96 | this = nouveau_mm_merge(rmm, this); | ||
97 | |||
98 | /* any entirely free blocks now? we'll want to remove typing | ||
99 | * on them now so they can be use for any memory allocation | ||
100 | */ | ||
101 | block_s = roundup(this->offset, rmm->block_size); | ||
102 | if (block_s + rmm->block_size > this->offset + this->length) | ||
103 | return; | ||
104 | |||
105 | /* split off any still-typed region at the start */ | ||
106 | if (block_s != this->offset) { | ||
107 | if (!region_split(rmm, this, block_s - this->offset)) | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | /* split off the soon-to-be-untyped block(s) */ | ||
112 | block_l = rounddown(this->length, rmm->block_size); | ||
113 | if (block_l != this->length) { | ||
114 | this = region_split(rmm, this, block_l); | ||
115 | if (!this) | ||
116 | return; | ||
117 | } | ||
118 | |||
119 | /* mark as having no type, and retry merge with any adjacent | ||
120 | * untyped blocks | ||
121 | */ | ||
122 | this->type = 0; | ||
123 | nouveau_mm_merge(rmm, this); | ||
124 | } | ||
125 | |||
126 | int | ||
127 | nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | ||
128 | u32 align, struct nouveau_mm_node **pnode) | ||
129 | { | ||
130 | struct nouveau_mm_node *this, *tmp, *next; | ||
131 | u32 splitoff, avail, alloc; | ||
132 | |||
133 | list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { | ||
134 | next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); | ||
135 | if (this->nl_entry.next == &rmm->nodes) | ||
136 | next = NULL; | ||
137 | |||
138 | /* skip wrongly typed blocks */ | ||
139 | if (this->type && this->type != type) | ||
140 | continue; | ||
141 | |||
142 | /* account for alignment */ | ||
143 | splitoff = this->offset & (align - 1); | ||
144 | if (splitoff) | ||
145 | splitoff = align - splitoff; | ||
146 | |||
147 | if (this->length <= splitoff) | ||
148 | continue; | ||
149 | |||
150 | /* determine total memory available from this, and | ||
151 | * the next block (if appropriate) | ||
152 | */ | ||
153 | avail = this->length; | ||
154 | if (next && next->free && (!next->type || next->type == type)) | ||
155 | avail += next->length; | ||
156 | |||
157 | avail -= splitoff; | ||
158 | |||
159 | /* determine allocation size */ | ||
160 | if (size_nc) { | ||
161 | alloc = min(avail, size); | ||
162 | alloc = rounddown(alloc, size_nc); | ||
163 | if (alloc == 0) | ||
164 | continue; | ||
165 | } else { | ||
166 | alloc = size; | ||
167 | if (avail < alloc) | ||
168 | continue; | ||
169 | } | ||
170 | |||
171 | /* untyped block, split off a chunk that's a multiple | ||
172 | * of block_size and type it | ||
173 | */ | ||
174 | if (!this->type) { | ||
175 | u32 block = roundup(alloc + splitoff, rmm->block_size); | ||
176 | if (this->length < block) | ||
177 | continue; | ||
178 | |||
179 | this = region_split(rmm, this, block); | ||
180 | if (!this) | ||
181 | return -ENOMEM; | ||
182 | |||
183 | this->type = type; | ||
184 | } | ||
185 | |||
186 | /* stealing memory from adjacent block */ | ||
187 | if (alloc > this->length) { | ||
188 | u32 amount = alloc - (this->length - splitoff); | ||
189 | |||
190 | if (!next->type) { | ||
191 | amount = roundup(amount, rmm->block_size); | ||
192 | |||
193 | next = region_split(rmm, next, amount); | ||
194 | if (!next) | ||
195 | return -ENOMEM; | ||
196 | |||
197 | next->type = type; | ||
198 | } | ||
199 | |||
200 | this->length += amount; | ||
201 | next->offset += amount; | ||
202 | next->length -= amount; | ||
203 | if (!next->length) { | ||
204 | list_del(&next->nl_entry); | ||
205 | list_del(&next->fl_entry); | ||
206 | kfree(next); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | if (splitoff) { | ||
211 | if (!region_split(rmm, this, splitoff)) | ||
212 | return -ENOMEM; | ||
213 | } | ||
214 | |||
215 | this = region_split(rmm, this, alloc); | ||
216 | if (this == NULL) | ||
217 | return -ENOMEM; | ||
218 | |||
219 | this->free = false; | ||
220 | list_del(&this->fl_entry); | ||
221 | *pnode = this; | ||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | return -ENOMEM; | ||
226 | } | ||
227 | |||
228 | int | ||
229 | nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) | ||
230 | { | ||
231 | struct nouveau_mm *rmm; | ||
232 | struct nouveau_mm_node *heap; | ||
233 | |||
234 | heap = kzalloc(sizeof(*heap), GFP_KERNEL); | ||
235 | if (!heap) | ||
236 | return -ENOMEM; | ||
237 | heap->free = true; | ||
238 | heap->offset = roundup(offset, block); | ||
239 | heap->length = rounddown(offset + length, block) - heap->offset; | ||
240 | |||
241 | rmm = kzalloc(sizeof(*rmm), GFP_KERNEL); | ||
242 | if (!rmm) { | ||
243 | kfree(heap); | ||
244 | return -ENOMEM; | ||
245 | } | ||
246 | rmm->block_size = block; | ||
247 | mutex_init(&rmm->mutex); | ||
248 | INIT_LIST_HEAD(&rmm->nodes); | ||
249 | INIT_LIST_HEAD(&rmm->free); | ||
250 | list_add(&heap->nl_entry, &rmm->nodes); | ||
251 | list_add(&heap->fl_entry, &rmm->free); | ||
252 | |||
253 | *prmm = rmm; | ||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | int | ||
258 | nouveau_mm_fini(struct nouveau_mm **prmm) | ||
259 | { | ||
260 | struct nouveau_mm *rmm = *prmm; | ||
261 | struct nouveau_mm_node *heap = | ||
262 | list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); | ||
263 | |||
264 | if (!list_is_singular(&rmm->nodes)) | ||
265 | return -EBUSY; | ||
266 | |||
267 | kfree(heap); | ||
268 | kfree(rmm); | ||
269 | *prmm = NULL; | ||
270 | return 0; | ||
271 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h new file mode 100644 index 000000000000..250e642de0a7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_mm.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #ifndef __NOUVEAU_REGION_H__ | ||
26 | #define __NOUVEAU_REGION_H__ | ||
27 | |||
28 | struct nouveau_mm_node { | ||
29 | struct list_head nl_entry; | ||
30 | struct list_head fl_entry; | ||
31 | struct list_head rl_entry; | ||
32 | |||
33 | bool free; | ||
34 | int type; | ||
35 | |||
36 | u32 offset; | ||
37 | u32 length; | ||
38 | }; | ||
39 | |||
40 | struct nouveau_mm { | ||
41 | struct list_head nodes; | ||
42 | struct list_head free; | ||
43 | |||
44 | struct mutex mutex; | ||
45 | |||
46 | u32 block_size; | ||
47 | }; | ||
48 | |||
49 | int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block); | ||
50 | int nouveau_mm_fini(struct nouveau_mm **); | ||
51 | int nouveau_mm_pre(struct nouveau_mm *); | ||
52 | int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, | ||
53 | u32 align, struct nouveau_mm_node **); | ||
54 | void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *); | ||
55 | |||
56 | int nv50_vram_init(struct drm_device *); | ||
57 | int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, | ||
58 | u32 memtype, struct nouveau_vram **); | ||
59 | void nv50_vram_del(struct drm_device *, struct nouveau_vram **); | ||
60 | bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags); | ||
61 | |||
62 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 2cc59f8c658b..a050b7b69782 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -99,7 +99,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
99 | int size, uint32_t *b_offset) | 99 | int size, uint32_t *b_offset) |
100 | { | 100 | { |
101 | struct drm_device *dev = chan->dev; | 101 | struct drm_device *dev = chan->dev; |
102 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
103 | struct nouveau_gpuobj *nobj = NULL; | 102 | struct nouveau_gpuobj *nobj = NULL; |
104 | struct drm_mm_node *mem; | 103 | struct drm_mm_node *mem; |
105 | uint32_t offset; | 104 | uint32_t offset; |
@@ -113,31 +112,15 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
113 | return -ENOMEM; | 112 | return -ENOMEM; |
114 | } | 113 | } |
115 | 114 | ||
116 | offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; | 115 | if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) |
117 | if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { | 116 | target = NV_MEM_TARGET_VRAM; |
118 | target = NV_DMA_TARGET_VIDMEM; | 117 | else |
119 | } else | 118 | target = NV_MEM_TARGET_GART; |
120 | if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) { | 119 | offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; |
121 | if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA && | ||
122 | dev_priv->card_type < NV_50) { | ||
123 | ret = nouveau_sgdma_get_page(dev, offset, &offset); | ||
124 | if (ret) | ||
125 | return ret; | ||
126 | target = NV_DMA_TARGET_PCI; | ||
127 | } else { | ||
128 | target = NV_DMA_TARGET_AGP; | ||
129 | if (dev_priv->card_type >= NV_50) | ||
130 | offset += dev_priv->vm_gart_base; | ||
131 | } | ||
132 | } else { | ||
133 | NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", | ||
134 | chan->notifier_bo->bo.mem.mem_type); | ||
135 | return -EINVAL; | ||
136 | } | ||
137 | offset += mem->start; | 120 | offset += mem->start; |
138 | 121 | ||
139 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, | 122 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, |
140 | mem->size, NV_DMA_ACCESS_RW, target, | 123 | mem->size, NV_MEM_ACCESS_RW, target, |
141 | &nobj); | 124 | &nobj); |
142 | if (ret) { | 125 | if (ret) { |
143 | drm_mm_put_block(mem); | 126 | drm_mm_put_block(mem); |
@@ -185,11 +168,11 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, | |||
185 | struct nouveau_channel *chan; | 168 | struct nouveau_channel *chan; |
186 | int ret; | 169 | int ret; |
187 | 170 | ||
188 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); | 171 | chan = nouveau_channel_get(dev, file_priv, na->channel); |
172 | if (IS_ERR(chan)) | ||
173 | return PTR_ERR(chan); | ||
189 | 174 | ||
190 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); | 175 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); |
191 | if (ret) | 176 | nouveau_channel_put(&chan); |
192 | return ret; | 177 | return ret; |
193 | |||
194 | return 0; | ||
195 | } | 178 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index dd572adca02a..55c9fdcfa67f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -35,6 +35,102 @@ | |||
35 | #include "nouveau_drv.h" | 35 | #include "nouveau_drv.h" |
36 | #include "nouveau_drm.h" | 36 | #include "nouveau_drm.h" |
37 | #include "nouveau_ramht.h" | 37 | #include "nouveau_ramht.h" |
38 | #include "nouveau_vm.h" | ||
39 | |||
40 | struct nouveau_gpuobj_method { | ||
41 | struct list_head head; | ||
42 | u32 mthd; | ||
43 | int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data); | ||
44 | }; | ||
45 | |||
46 | struct nouveau_gpuobj_class { | ||
47 | struct list_head head; | ||
48 | struct list_head methods; | ||
49 | u32 id; | ||
50 | u32 engine; | ||
51 | }; | ||
52 | |||
53 | int | ||
54 | nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine) | ||
55 | { | ||
56 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
57 | struct nouveau_gpuobj_class *oc; | ||
58 | |||
59 | oc = kzalloc(sizeof(*oc), GFP_KERNEL); | ||
60 | if (!oc) | ||
61 | return -ENOMEM; | ||
62 | |||
63 | INIT_LIST_HEAD(&oc->methods); | ||
64 | oc->id = class; | ||
65 | oc->engine = engine; | ||
66 | list_add(&oc->head, &dev_priv->classes); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | int | ||
71 | nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd, | ||
72 | int (*exec)(struct nouveau_channel *, u32, u32, u32)) | ||
73 | { | ||
74 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
75 | struct nouveau_gpuobj_method *om; | ||
76 | struct nouveau_gpuobj_class *oc; | ||
77 | |||
78 | list_for_each_entry(oc, &dev_priv->classes, head) { | ||
79 | if (oc->id == class) | ||
80 | goto found; | ||
81 | } | ||
82 | |||
83 | return -EINVAL; | ||
84 | |||
85 | found: | ||
86 | om = kzalloc(sizeof(*om), GFP_KERNEL); | ||
87 | if (!om) | ||
88 | return -ENOMEM; | ||
89 | |||
90 | om->mthd = mthd; | ||
91 | om->exec = exec; | ||
92 | list_add(&om->head, &oc->methods); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | int | ||
97 | nouveau_gpuobj_mthd_call(struct nouveau_channel *chan, | ||
98 | u32 class, u32 mthd, u32 data) | ||
99 | { | ||
100 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
101 | struct nouveau_gpuobj_method *om; | ||
102 | struct nouveau_gpuobj_class *oc; | ||
103 | |||
104 | list_for_each_entry(oc, &dev_priv->classes, head) { | ||
105 | if (oc->id != class) | ||
106 | continue; | ||
107 | |||
108 | list_for_each_entry(om, &oc->methods, head) { | ||
109 | if (om->mthd == mthd) | ||
110 | return om->exec(chan, class, mthd, data); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | return -ENOENT; | ||
115 | } | ||
116 | |||
117 | int | ||
118 | nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, | ||
119 | u32 class, u32 mthd, u32 data) | ||
120 | { | ||
121 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
122 | struct nouveau_channel *chan = NULL; | ||
123 | unsigned long flags; | ||
124 | int ret = -EINVAL; | ||
125 | |||
126 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
127 | if (chid > 0 && chid < dev_priv->engine.fifo.channels) | ||
128 | chan = dev_priv->channels.ptr[chid]; | ||
129 | if (chan) | ||
130 | ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); | ||
131 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
132 | return ret; | ||
133 | } | ||
38 | 134 | ||
39 | /* NVidia uses context objects to drive drawing operations. | 135 | /* NVidia uses context objects to drive drawing operations. |
40 | 136 | ||
@@ -73,17 +169,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
73 | struct nouveau_gpuobj **gpuobj_ret) | 169 | struct nouveau_gpuobj **gpuobj_ret) |
74 | { | 170 | { |
75 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 171 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
76 | struct nouveau_engine *engine = &dev_priv->engine; | 172 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
77 | struct nouveau_gpuobj *gpuobj; | 173 | struct nouveau_gpuobj *gpuobj; |
78 | struct drm_mm_node *ramin = NULL; | 174 | struct drm_mm_node *ramin = NULL; |
79 | int ret; | 175 | int ret, i; |
80 | 176 | ||
81 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", | 177 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", |
82 | chan ? chan->id : -1, size, align, flags); | 178 | chan ? chan->id : -1, size, align, flags); |
83 | 179 | ||
84 | if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) | ||
85 | return -EINVAL; | ||
86 | |||
87 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | 180 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); |
88 | if (!gpuobj) | 181 | if (!gpuobj) |
89 | return -ENOMEM; | 182 | return -ENOMEM; |
@@ -98,88 +191,41 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
98 | spin_unlock(&dev_priv->ramin_lock); | 191 | spin_unlock(&dev_priv->ramin_lock); |
99 | 192 | ||
100 | if (chan) { | 193 | if (chan) { |
101 | NV_DEBUG(dev, "channel heap\n"); | ||
102 | |||
103 | ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); | 194 | ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); |
104 | if (ramin) | 195 | if (ramin) |
105 | ramin = drm_mm_get_block(ramin, size, align); | 196 | ramin = drm_mm_get_block(ramin, size, align); |
106 | |||
107 | if (!ramin) { | 197 | if (!ramin) { |
108 | nouveau_gpuobj_ref(NULL, &gpuobj); | 198 | nouveau_gpuobj_ref(NULL, &gpuobj); |
109 | return -ENOMEM; | 199 | return -ENOMEM; |
110 | } | 200 | } |
111 | } else { | ||
112 | NV_DEBUG(dev, "global heap\n"); | ||
113 | |||
114 | /* allocate backing pages, sets vinst */ | ||
115 | ret = engine->instmem.populate(dev, gpuobj, &size); | ||
116 | if (ret) { | ||
117 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | /* try and get aperture space */ | ||
122 | do { | ||
123 | if (drm_mm_pre_get(&dev_priv->ramin_heap)) | ||
124 | return -ENOMEM; | ||
125 | 201 | ||
126 | spin_lock(&dev_priv->ramin_lock); | 202 | gpuobj->pinst = chan->ramin->pinst; |
127 | ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, | 203 | if (gpuobj->pinst != ~0) |
128 | align, 0); | 204 | gpuobj->pinst += ramin->start; |
129 | if (ramin == NULL) { | ||
130 | spin_unlock(&dev_priv->ramin_lock); | ||
131 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
132 | return -ENOMEM; | ||
133 | } | ||
134 | |||
135 | ramin = drm_mm_get_block_atomic(ramin, size, align); | ||
136 | spin_unlock(&dev_priv->ramin_lock); | ||
137 | } while (ramin == NULL); | ||
138 | |||
139 | /* on nv50 it's ok to fail, we have a fallback path */ | ||
140 | if (!ramin && dev_priv->card_type < NV_50) { | ||
141 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
142 | return -ENOMEM; | ||
143 | } | ||
144 | } | ||
145 | 205 | ||
146 | /* if we got a chunk of the aperture, map pages into it */ | 206 | gpuobj->cinst = ramin->start; |
147 | gpuobj->im_pramin = ramin; | 207 | gpuobj->vinst = ramin->start + chan->ramin->vinst; |
148 | if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) { | 208 | gpuobj->node = ramin; |
149 | ret = engine->instmem.bind(dev, gpuobj); | 209 | } else { |
210 | ret = instmem->get(gpuobj, size, align); | ||
150 | if (ret) { | 211 | if (ret) { |
151 | nouveau_gpuobj_ref(NULL, &gpuobj); | 212 | nouveau_gpuobj_ref(NULL, &gpuobj); |
152 | return ret; | 213 | return ret; |
153 | } | 214 | } |
154 | } | ||
155 | 215 | ||
156 | /* calculate the various different addresses for the object */ | 216 | ret = -ENOSYS; |
157 | if (chan) { | 217 | if (!(flags & NVOBJ_FLAG_DONT_MAP)) |
158 | gpuobj->pinst = chan->ramin->pinst; | 218 | ret = instmem->map(gpuobj); |
159 | if (gpuobj->pinst != ~0) | 219 | if (ret) |
160 | gpuobj->pinst += gpuobj->im_pramin->start; | ||
161 | |||
162 | if (dev_priv->card_type < NV_50) { | ||
163 | gpuobj->cinst = gpuobj->pinst; | ||
164 | } else { | ||
165 | gpuobj->cinst = gpuobj->im_pramin->start; | ||
166 | gpuobj->vinst = gpuobj->im_pramin->start + | ||
167 | chan->ramin->vinst; | ||
168 | } | ||
169 | } else { | ||
170 | if (gpuobj->im_pramin) | ||
171 | gpuobj->pinst = gpuobj->im_pramin->start; | ||
172 | else | ||
173 | gpuobj->pinst = ~0; | 220 | gpuobj->pinst = ~0; |
174 | gpuobj->cinst = 0xdeadbeef; | 221 | |
222 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; | ||
175 | } | 223 | } |
176 | 224 | ||
177 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | 225 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { |
178 | int i; | ||
179 | |||
180 | for (i = 0; i < gpuobj->size; i += 4) | 226 | for (i = 0; i < gpuobj->size; i += 4) |
181 | nv_wo32(gpuobj, i, 0); | 227 | nv_wo32(gpuobj, i, 0); |
182 | engine->instmem.flush(dev); | 228 | instmem->flush(dev); |
183 | } | 229 | } |
184 | 230 | ||
185 | 231 | ||
@@ -195,6 +241,7 @@ nouveau_gpuobj_init(struct drm_device *dev) | |||
195 | NV_DEBUG(dev, "\n"); | 241 | NV_DEBUG(dev, "\n"); |
196 | 242 | ||
197 | INIT_LIST_HEAD(&dev_priv->gpuobj_list); | 243 | INIT_LIST_HEAD(&dev_priv->gpuobj_list); |
244 | INIT_LIST_HEAD(&dev_priv->classes); | ||
198 | spin_lock_init(&dev_priv->ramin_lock); | 245 | spin_lock_init(&dev_priv->ramin_lock); |
199 | dev_priv->ramin_base = ~0; | 246 | dev_priv->ramin_base = ~0; |
200 | 247 | ||
@@ -205,9 +252,20 @@ void | |||
205 | nouveau_gpuobj_takedown(struct drm_device *dev) | 252 | nouveau_gpuobj_takedown(struct drm_device *dev) |
206 | { | 253 | { |
207 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 254 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
255 | struct nouveau_gpuobj_method *om, *tm; | ||
256 | struct nouveau_gpuobj_class *oc, *tc; | ||
208 | 257 | ||
209 | NV_DEBUG(dev, "\n"); | 258 | NV_DEBUG(dev, "\n"); |
210 | 259 | ||
260 | list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) { | ||
261 | list_for_each_entry_safe(om, tm, &oc->methods, head) { | ||
262 | list_del(&om->head); | ||
263 | kfree(om); | ||
264 | } | ||
265 | list_del(&oc->head); | ||
266 | kfree(oc); | ||
267 | } | ||
268 | |||
211 | BUG_ON(!list_empty(&dev_priv->gpuobj_list)); | 269 | BUG_ON(!list_empty(&dev_priv->gpuobj_list)); |
212 | } | 270 | } |
213 | 271 | ||
@@ -219,26 +277,34 @@ nouveau_gpuobj_del(struct kref *ref) | |||
219 | container_of(ref, struct nouveau_gpuobj, refcount); | 277 | container_of(ref, struct nouveau_gpuobj, refcount); |
220 | struct drm_device *dev = gpuobj->dev; | 278 | struct drm_device *dev = gpuobj->dev; |
221 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 279 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
222 | struct nouveau_engine *engine = &dev_priv->engine; | 280 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
223 | int i; | 281 | int i; |
224 | 282 | ||
225 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | 283 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
226 | 284 | ||
227 | if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { | 285 | if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { |
228 | for (i = 0; i < gpuobj->size; i += 4) | 286 | for (i = 0; i < gpuobj->size; i += 4) |
229 | nv_wo32(gpuobj, i, 0); | 287 | nv_wo32(gpuobj, i, 0); |
230 | engine->instmem.flush(dev); | 288 | instmem->flush(dev); |
231 | } | 289 | } |
232 | 290 | ||
233 | if (gpuobj->dtor) | 291 | if (gpuobj->dtor) |
234 | gpuobj->dtor(dev, gpuobj); | 292 | gpuobj->dtor(dev, gpuobj); |
235 | 293 | ||
236 | if (gpuobj->im_backing) | 294 | if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { |
237 | engine->instmem.clear(dev, gpuobj); | 295 | if (gpuobj->node) { |
296 | instmem->unmap(gpuobj); | ||
297 | instmem->put(gpuobj); | ||
298 | } | ||
299 | } else { | ||
300 | if (gpuobj->node) { | ||
301 | spin_lock(&dev_priv->ramin_lock); | ||
302 | drm_mm_put_block(gpuobj->node); | ||
303 | spin_unlock(&dev_priv->ramin_lock); | ||
304 | } | ||
305 | } | ||
238 | 306 | ||
239 | spin_lock(&dev_priv->ramin_lock); | 307 | spin_lock(&dev_priv->ramin_lock); |
240 | if (gpuobj->im_pramin) | ||
241 | drm_mm_put_block(gpuobj->im_pramin); | ||
242 | list_del(&gpuobj->list); | 308 | list_del(&gpuobj->list); |
243 | spin_unlock(&dev_priv->ramin_lock); | 309 | spin_unlock(&dev_priv->ramin_lock); |
244 | 310 | ||
@@ -278,7 +344,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, | |||
278 | kref_init(&gpuobj->refcount); | 344 | kref_init(&gpuobj->refcount); |
279 | gpuobj->size = size; | 345 | gpuobj->size = size; |
280 | gpuobj->pinst = pinst; | 346 | gpuobj->pinst = pinst; |
281 | gpuobj->cinst = 0xdeadbeef; | 347 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; |
282 | gpuobj->vinst = vinst; | 348 | gpuobj->vinst = vinst; |
283 | 349 | ||
284 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | 350 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { |
@@ -335,113 +401,150 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) | |||
335 | The method below creates a DMA object in instance RAM and returns a handle | 401 | The method below creates a DMA object in instance RAM and returns a handle |
336 | to it that can be used to set up context objects. | 402 | to it that can be used to set up context objects. |
337 | */ | 403 | */ |
338 | int | 404 | |
339 | nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, | 405 | void |
340 | uint64_t offset, uint64_t size, int access, | 406 | nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, |
341 | int target, struct nouveau_gpuobj **gpuobj) | 407 | u64 base, u64 size, int target, int access, |
408 | u32 type, u32 comp) | ||
342 | { | 409 | { |
343 | struct drm_device *dev = chan->dev; | 410 | struct drm_nouveau_private *dev_priv = obj->dev->dev_private; |
344 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 411 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; |
345 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | 412 | u32 flags0; |
346 | int ret; | ||
347 | 413 | ||
348 | NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n", | 414 | flags0 = (comp << 29) | (type << 22) | class; |
349 | chan->id, class, offset, size); | 415 | flags0 |= 0x00100000; |
350 | NV_DEBUG(dev, "access=%d target=%d\n", access, target); | 416 | |
417 | switch (access) { | ||
418 | case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break; | ||
419 | case NV_MEM_ACCESS_RW: | ||
420 | case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break; | ||
421 | default: | ||
422 | break; | ||
423 | } | ||
351 | 424 | ||
352 | switch (target) { | 425 | switch (target) { |
353 | case NV_DMA_TARGET_AGP: | 426 | case NV_MEM_TARGET_VRAM: |
354 | offset += dev_priv->gart_info.aper_base; | 427 | flags0 |= 0x00010000; |
428 | break; | ||
429 | case NV_MEM_TARGET_PCI: | ||
430 | flags0 |= 0x00020000; | ||
431 | break; | ||
432 | case NV_MEM_TARGET_PCI_NOSNOOP: | ||
433 | flags0 |= 0x00030000; | ||
355 | break; | 434 | break; |
435 | case NV_MEM_TARGET_GART: | ||
436 | base += dev_priv->gart_info.aper_base; | ||
356 | default: | 437 | default: |
438 | flags0 &= ~0x00100000; | ||
357 | break; | 439 | break; |
358 | } | 440 | } |
359 | 441 | ||
360 | ret = nouveau_gpuobj_new(dev, chan, | 442 | /* convert to base + limit */ |
361 | nouveau_gpuobj_class_instmem_size(dev, class), | 443 | size = (base + size) - 1; |
362 | 16, NVOBJ_FLAG_ZERO_ALLOC | | ||
363 | NVOBJ_FLAG_ZERO_FREE, gpuobj); | ||
364 | if (ret) { | ||
365 | NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); | ||
366 | return ret; | ||
367 | } | ||
368 | 444 | ||
369 | if (dev_priv->card_type < NV_50) { | 445 | nv_wo32(obj, offset + 0x00, flags0); |
370 | uint32_t frame, adjust, pte_flags = 0; | 446 | nv_wo32(obj, offset + 0x04, lower_32_bits(size)); |
371 | 447 | nv_wo32(obj, offset + 0x08, lower_32_bits(base)); | |
372 | if (access != NV_DMA_ACCESS_RO) | 448 | nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 | |
373 | pte_flags |= (1<<1); | 449 | upper_32_bits(base)); |
374 | adjust = offset & 0x00000fff; | 450 | nv_wo32(obj, offset + 0x10, 0x00000000); |
375 | frame = offset & ~0x00000fff; | 451 | nv_wo32(obj, offset + 0x14, 0x00000000); |
376 | |||
377 | nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) | | ||
378 | (access << 14) | (target << 16) | | ||
379 | class)); | ||
380 | nv_wo32(*gpuobj, 4, size - 1); | ||
381 | nv_wo32(*gpuobj, 8, frame | pte_flags); | ||
382 | nv_wo32(*gpuobj, 12, frame | pte_flags); | ||
383 | } else { | ||
384 | uint64_t limit = offset + size - 1; | ||
385 | uint32_t flags0, flags5; | ||
386 | 452 | ||
387 | if (target == NV_DMA_TARGET_VIDMEM) { | 453 | pinstmem->flush(obj->dev); |
388 | flags0 = 0x00190000; | 454 | } |
389 | flags5 = 0x00010000; | ||
390 | } else { | ||
391 | flags0 = 0x7fc00000; | ||
392 | flags5 = 0x00080000; | ||
393 | } | ||
394 | 455 | ||
395 | nv_wo32(*gpuobj, 0, flags0 | class); | 456 | int |
396 | nv_wo32(*gpuobj, 4, lower_32_bits(limit)); | 457 | nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size, |
397 | nv_wo32(*gpuobj, 8, lower_32_bits(offset)); | 458 | int target, int access, u32 type, u32 comp, |
398 | nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) | | 459 | struct nouveau_gpuobj **pobj) |
399 | (upper_32_bits(offset) & 0xff)); | 460 | { |
400 | nv_wo32(*gpuobj, 20, flags5); | 461 | struct drm_device *dev = chan->dev; |
401 | } | 462 | int ret; |
402 | 463 | ||
403 | instmem->flush(dev); | 464 | ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj); |
465 | if (ret) | ||
466 | return ret; | ||
404 | 467 | ||
405 | (*gpuobj)->engine = NVOBJ_ENGINE_SW; | 468 | nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target, |
406 | (*gpuobj)->class = class; | 469 | access, type, comp); |
407 | return 0; | 470 | return 0; |
408 | } | 471 | } |
409 | 472 | ||
410 | int | 473 | int |
411 | nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, | 474 | nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, |
412 | uint64_t offset, uint64_t size, int access, | 475 | u64 size, int access, int target, |
413 | struct nouveau_gpuobj **gpuobj, | 476 | struct nouveau_gpuobj **pobj) |
414 | uint32_t *o_ret) | ||
415 | { | 477 | { |
478 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
416 | struct drm_device *dev = chan->dev; | 479 | struct drm_device *dev = chan->dev; |
417 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 480 | struct nouveau_gpuobj *obj; |
481 | u32 flags0, flags2; | ||
418 | int ret; | 482 | int ret; |
419 | 483 | ||
420 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || | 484 | if (dev_priv->card_type >= NV_50) { |
421 | (dev_priv->card_type >= NV_50 && | 485 | u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0; |
422 | dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { | 486 | u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0; |
423 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 487 | |
424 | offset + dev_priv->vm_gart_base, | 488 | return nv50_gpuobj_dma_new(chan, class, base, size, |
425 | size, access, NV_DMA_TARGET_AGP, | 489 | target, access, type, comp, pobj); |
426 | gpuobj); | 490 | } |
427 | if (o_ret) | 491 | |
428 | *o_ret = 0; | 492 | if (target == NV_MEM_TARGET_GART) { |
429 | } else | 493 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { |
430 | if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { | 494 | target = NV_MEM_TARGET_PCI_NOSNOOP; |
431 | nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj); | 495 | base += dev_priv->gart_info.aper_base; |
432 | if (offset & ~0xffffffffULL) { | 496 | } else |
433 | NV_ERROR(dev, "obj offset exceeds 32-bits\n"); | 497 | if (base != 0) { |
434 | return -EINVAL; | 498 | base = nouveau_sgdma_get_physical(dev, base); |
499 | target = NV_MEM_TARGET_PCI; | ||
500 | } else { | ||
501 | nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj); | ||
502 | return 0; | ||
435 | } | 503 | } |
436 | if (o_ret) | ||
437 | *o_ret = (uint32_t)offset; | ||
438 | ret = (*gpuobj != NULL) ? 0 : -EINVAL; | ||
439 | } else { | ||
440 | NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); | ||
441 | return -EINVAL; | ||
442 | } | 504 | } |
443 | 505 | ||
444 | return ret; | 506 | flags0 = class; |
507 | flags0 |= 0x00003000; /* PT present, PT linear */ | ||
508 | flags2 = 0; | ||
509 | |||
510 | switch (target) { | ||
511 | case NV_MEM_TARGET_PCI: | ||
512 | flags0 |= 0x00020000; | ||
513 | break; | ||
514 | case NV_MEM_TARGET_PCI_NOSNOOP: | ||
515 | flags0 |= 0x00030000; | ||
516 | break; | ||
517 | default: | ||
518 | break; | ||
519 | } | ||
520 | |||
521 | switch (access) { | ||
522 | case NV_MEM_ACCESS_RO: | ||
523 | flags0 |= 0x00004000; | ||
524 | break; | ||
525 | case NV_MEM_ACCESS_WO: | ||
526 | flags0 |= 0x00008000; | ||
527 | default: | ||
528 | flags2 |= 0x00000002; | ||
529 | break; | ||
530 | } | ||
531 | |||
532 | flags0 |= (base & 0x00000fff) << 20; | ||
533 | flags2 |= (base & 0xfffff000); | ||
534 | |||
535 | ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); | ||
536 | if (ret) | ||
537 | return ret; | ||
538 | |||
539 | nv_wo32(obj, 0x00, flags0); | ||
540 | nv_wo32(obj, 0x04, size - 1); | ||
541 | nv_wo32(obj, 0x08, flags2); | ||
542 | nv_wo32(obj, 0x0c, flags2); | ||
543 | |||
544 | obj->engine = NVOBJ_ENGINE_SW; | ||
545 | obj->class = class; | ||
546 | *pobj = obj; | ||
547 | return 0; | ||
445 | } | 548 | } |
446 | 549 | ||
447 | /* Context objects in the instance RAM have the following structure. | 550 | /* Context objects in the instance RAM have the following structure. |
@@ -495,82 +598,122 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, | |||
495 | entry[5]: | 598 | entry[5]: |
496 | set to 0? | 599 | set to 0? |
497 | */ | 600 | */ |
601 | static int | ||
602 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, | ||
603 | struct nouveau_gpuobj **gpuobj_ret) | ||
604 | { | ||
605 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
606 | struct nouveau_gpuobj *gpuobj; | ||
607 | |||
608 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | ||
609 | if (!gpuobj) | ||
610 | return -ENOMEM; | ||
611 | gpuobj->dev = chan->dev; | ||
612 | gpuobj->engine = NVOBJ_ENGINE_SW; | ||
613 | gpuobj->class = class; | ||
614 | kref_init(&gpuobj->refcount); | ||
615 | gpuobj->cinst = 0x40; | ||
616 | |||
617 | spin_lock(&dev_priv->ramin_lock); | ||
618 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | ||
619 | spin_unlock(&dev_priv->ramin_lock); | ||
620 | *gpuobj_ret = gpuobj; | ||
621 | return 0; | ||
622 | } | ||
623 | |||
498 | int | 624 | int |
499 | nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, | 625 | nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) |
500 | struct nouveau_gpuobj **gpuobj) | ||
501 | { | 626 | { |
627 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
502 | struct drm_device *dev = chan->dev; | 628 | struct drm_device *dev = chan->dev; |
503 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 629 | struct nouveau_gpuobj_class *oc; |
630 | struct nouveau_gpuobj *gpuobj; | ||
504 | int ret; | 631 | int ret; |
505 | 632 | ||
506 | NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); | 633 | NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); |
507 | 634 | ||
635 | list_for_each_entry(oc, &dev_priv->classes, head) { | ||
636 | if (oc->id == class) | ||
637 | goto found; | ||
638 | } | ||
639 | |||
640 | NV_ERROR(dev, "illegal object class: 0x%x\n", class); | ||
641 | return -EINVAL; | ||
642 | |||
643 | found: | ||
644 | switch (oc->engine) { | ||
645 | case NVOBJ_ENGINE_SW: | ||
646 | ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj); | ||
647 | if (ret) | ||
648 | return ret; | ||
649 | goto insert; | ||
650 | case NVOBJ_ENGINE_GR: | ||
651 | if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) { | ||
652 | struct nouveau_pgraph_engine *pgraph = | ||
653 | &dev_priv->engine.graph; | ||
654 | |||
655 | ret = pgraph->create_context(chan); | ||
656 | if (ret) | ||
657 | return ret; | ||
658 | } | ||
659 | break; | ||
660 | case NVOBJ_ENGINE_CRYPT: | ||
661 | if (!chan->crypt_ctx) { | ||
662 | struct nouveau_crypt_engine *pcrypt = | ||
663 | &dev_priv->engine.crypt; | ||
664 | |||
665 | ret = pcrypt->create_context(chan); | ||
666 | if (ret) | ||
667 | return ret; | ||
668 | } | ||
669 | break; | ||
670 | } | ||
671 | |||
508 | ret = nouveau_gpuobj_new(dev, chan, | 672 | ret = nouveau_gpuobj_new(dev, chan, |
509 | nouveau_gpuobj_class_instmem_size(dev, class), | 673 | nouveau_gpuobj_class_instmem_size(dev, class), |
510 | 16, | 674 | 16, |
511 | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, | 675 | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, |
512 | gpuobj); | 676 | &gpuobj); |
513 | if (ret) { | 677 | if (ret) { |
514 | NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); | 678 | NV_ERROR(dev, "error creating gpuobj: %d\n", ret); |
515 | return ret; | 679 | return ret; |
516 | } | 680 | } |
517 | 681 | ||
518 | if (dev_priv->card_type >= NV_50) { | 682 | if (dev_priv->card_type >= NV_50) { |
519 | nv_wo32(*gpuobj, 0, class); | 683 | nv_wo32(gpuobj, 0, class); |
520 | nv_wo32(*gpuobj, 20, 0x00010000); | 684 | nv_wo32(gpuobj, 20, 0x00010000); |
521 | } else { | 685 | } else { |
522 | switch (class) { | 686 | switch (class) { |
523 | case NV_CLASS_NULL: | 687 | case NV_CLASS_NULL: |
524 | nv_wo32(*gpuobj, 0, 0x00001030); | 688 | nv_wo32(gpuobj, 0, 0x00001030); |
525 | nv_wo32(*gpuobj, 4, 0xFFFFFFFF); | 689 | nv_wo32(gpuobj, 4, 0xFFFFFFFF); |
526 | break; | 690 | break; |
527 | default: | 691 | default: |
528 | if (dev_priv->card_type >= NV_40) { | 692 | if (dev_priv->card_type >= NV_40) { |
529 | nv_wo32(*gpuobj, 0, class); | 693 | nv_wo32(gpuobj, 0, class); |
530 | #ifdef __BIG_ENDIAN | 694 | #ifdef __BIG_ENDIAN |
531 | nv_wo32(*gpuobj, 8, 0x01000000); | 695 | nv_wo32(gpuobj, 8, 0x01000000); |
532 | #endif | 696 | #endif |
533 | } else { | 697 | } else { |
534 | #ifdef __BIG_ENDIAN | 698 | #ifdef __BIG_ENDIAN |
535 | nv_wo32(*gpuobj, 0, class | 0x00080000); | 699 | nv_wo32(gpuobj, 0, class | 0x00080000); |
536 | #else | 700 | #else |
537 | nv_wo32(*gpuobj, 0, class); | 701 | nv_wo32(gpuobj, 0, class); |
538 | #endif | 702 | #endif |
539 | } | 703 | } |
540 | } | 704 | } |
541 | } | 705 | } |
542 | dev_priv->engine.instmem.flush(dev); | 706 | dev_priv->engine.instmem.flush(dev); |
543 | 707 | ||
544 | (*gpuobj)->engine = NVOBJ_ENGINE_GR; | 708 | gpuobj->engine = oc->engine; |
545 | (*gpuobj)->class = class; | 709 | gpuobj->class = oc->id; |
546 | return 0; | ||
547 | } | ||
548 | |||
549 | int | ||
550 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, | ||
551 | struct nouveau_gpuobj **gpuobj_ret) | ||
552 | { | ||
553 | struct drm_nouveau_private *dev_priv; | ||
554 | struct nouveau_gpuobj *gpuobj; | ||
555 | |||
556 | if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) | ||
557 | return -EINVAL; | ||
558 | dev_priv = chan->dev->dev_private; | ||
559 | |||
560 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | ||
561 | if (!gpuobj) | ||
562 | return -ENOMEM; | ||
563 | gpuobj->dev = chan->dev; | ||
564 | gpuobj->engine = NVOBJ_ENGINE_SW; | ||
565 | gpuobj->class = class; | ||
566 | kref_init(&gpuobj->refcount); | ||
567 | gpuobj->cinst = 0x40; | ||
568 | 710 | ||
569 | spin_lock(&dev_priv->ramin_lock); | 711 | insert: |
570 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | 712 | ret = nouveau_ramht_insert(chan, handle, gpuobj); |
571 | spin_unlock(&dev_priv->ramin_lock); | 713 | if (ret) |
572 | *gpuobj_ret = gpuobj; | 714 | NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret); |
573 | return 0; | 715 | nouveau_gpuobj_ref(NULL, &gpuobj); |
716 | return ret; | ||
574 | } | 717 | } |
575 | 718 | ||
576 | static int | 719 | static int |
@@ -585,7 +728,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | |||
585 | NV_DEBUG(dev, "ch%d\n", chan->id); | 728 | NV_DEBUG(dev, "ch%d\n", chan->id); |
586 | 729 | ||
587 | /* Base amount for object storage (4KiB enough?) */ | 730 | /* Base amount for object storage (4KiB enough?) */ |
588 | size = 0x1000; | 731 | size = 0x2000; |
589 | base = 0; | 732 | base = 0; |
590 | 733 | ||
591 | /* PGRAPH context */ | 734 | /* PGRAPH context */ |
@@ -624,9 +767,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
624 | { | 767 | { |
625 | struct drm_device *dev = chan->dev; | 768 | struct drm_device *dev = chan->dev; |
626 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 769 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
627 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | ||
628 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; | 770 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; |
629 | int ret, i; | 771 | int ret; |
630 | 772 | ||
631 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); | 773 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); |
632 | 774 | ||
@@ -637,16 +779,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
637 | return ret; | 779 | return ret; |
638 | } | 780 | } |
639 | 781 | ||
640 | /* NV50 VM | 782 | /* NV50/NVC0 VM |
641 | * - Allocate per-channel page-directory | 783 | * - Allocate per-channel page-directory |
642 | * - Map GART and VRAM into the channel's address space at the | 784 | * - Link with shared channel VM |
643 | * locations determined during init. | ||
644 | */ | 785 | */ |
645 | if (dev_priv->card_type >= NV_50) { | 786 | if (dev_priv->chan_vm) { |
646 | u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; | 787 | u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; |
647 | u64 vm_vinst = chan->ramin->vinst + pgd_offs; | 788 | u64 vm_vinst = chan->ramin->vinst + pgd_offs; |
648 | u32 vm_pinst = chan->ramin->pinst; | 789 | u32 vm_pinst = chan->ramin->pinst; |
649 | u32 pde; | ||
650 | 790 | ||
651 | if (vm_pinst != ~0) | 791 | if (vm_pinst != ~0) |
652 | vm_pinst += pgd_offs; | 792 | vm_pinst += pgd_offs; |
@@ -655,29 +795,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
655 | 0, &chan->vm_pd); | 795 | 0, &chan->vm_pd); |
656 | if (ret) | 796 | if (ret) |
657 | return ret; | 797 | return ret; |
658 | for (i = 0; i < 0x4000; i += 8) { | ||
659 | nv_wo32(chan->vm_pd, i + 0, 0x00000000); | ||
660 | nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); | ||
661 | } | ||
662 | |||
663 | nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, | ||
664 | &chan->vm_gart_pt); | ||
665 | pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; | ||
666 | nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3); | ||
667 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); | ||
668 | |||
669 | pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; | ||
670 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { | ||
671 | nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i], | ||
672 | &chan->vm_vram_pt[i]); | ||
673 | |||
674 | nv_wo32(chan->vm_pd, pde + 0, | ||
675 | chan->vm_vram_pt[i]->vinst | 0x61); | ||
676 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); | ||
677 | pde += 8; | ||
678 | } | ||
679 | 798 | ||
680 | instmem->flush(dev); | 799 | nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); |
681 | } | 800 | } |
682 | 801 | ||
683 | /* RAMHT */ | 802 | /* RAMHT */ |
@@ -700,9 +819,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
700 | /* VRAM ctxdma */ | 819 | /* VRAM ctxdma */ |
701 | if (dev_priv->card_type >= NV_50) { | 820 | if (dev_priv->card_type >= NV_50) { |
702 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 821 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
703 | 0, dev_priv->vm_end, | 822 | 0, (1ULL << 40), NV_MEM_ACCESS_RW, |
704 | NV_DMA_ACCESS_RW, | 823 | NV_MEM_TARGET_VM, &vram); |
705 | NV_DMA_TARGET_AGP, &vram); | ||
706 | if (ret) { | 824 | if (ret) { |
707 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | 825 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); |
708 | return ret; | 826 | return ret; |
@@ -710,8 +828,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
710 | } else { | 828 | } else { |
711 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 829 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
712 | 0, dev_priv->fb_available_size, | 830 | 0, dev_priv->fb_available_size, |
713 | NV_DMA_ACCESS_RW, | 831 | NV_MEM_ACCESS_RW, |
714 | NV_DMA_TARGET_VIDMEM, &vram); | 832 | NV_MEM_TARGET_VRAM, &vram); |
715 | if (ret) { | 833 | if (ret) { |
716 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | 834 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); |
717 | return ret; | 835 | return ret; |
@@ -728,21 +846,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
728 | /* TT memory ctxdma */ | 846 | /* TT memory ctxdma */ |
729 | if (dev_priv->card_type >= NV_50) { | 847 | if (dev_priv->card_type >= NV_50) { |
730 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 848 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
731 | 0, dev_priv->vm_end, | 849 | 0, (1ULL << 40), NV_MEM_ACCESS_RW, |
732 | NV_DMA_ACCESS_RW, | 850 | NV_MEM_TARGET_VM, &tt); |
733 | NV_DMA_TARGET_AGP, &tt); | ||
734 | if (ret) { | ||
735 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | ||
736 | return ret; | ||
737 | } | ||
738 | } else | ||
739 | if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { | ||
740 | ret = nouveau_gpuobj_gart_dma_new(chan, 0, | ||
741 | dev_priv->gart_info.aper_size, | ||
742 | NV_DMA_ACCESS_RW, &tt, NULL); | ||
743 | } else { | 851 | } else { |
744 | NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); | 852 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
745 | ret = -EINVAL; | 853 | 0, dev_priv->gart_info.aper_size, |
854 | NV_MEM_ACCESS_RW, | ||
855 | NV_MEM_TARGET_GART, &tt); | ||
746 | } | 856 | } |
747 | 857 | ||
748 | if (ret) { | 858 | if (ret) { |
@@ -763,9 +873,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
763 | void | 873 | void |
764 | nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | 874 | nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) |
765 | { | 875 | { |
766 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
767 | struct drm_device *dev = chan->dev; | 876 | struct drm_device *dev = chan->dev; |
768 | int i; | ||
769 | 877 | ||
770 | NV_DEBUG(dev, "ch%d\n", chan->id); | 878 | NV_DEBUG(dev, "ch%d\n", chan->id); |
771 | 879 | ||
@@ -774,10 +882,8 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | |||
774 | 882 | ||
775 | nouveau_ramht_ref(NULL, &chan->ramht, chan); | 883 | nouveau_ramht_ref(NULL, &chan->ramht, chan); |
776 | 884 | ||
885 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); | ||
777 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); | 886 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); |
778 | nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt); | ||
779 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) | ||
780 | nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); | ||
781 | 887 | ||
782 | if (chan->ramin_heap.free_stack.next) | 888 | if (chan->ramin_heap.free_stack.next) |
783 | drm_mm_takedown(&chan->ramin_heap); | 889 | drm_mm_takedown(&chan->ramin_heap); |
@@ -791,147 +897,91 @@ nouveau_gpuobj_suspend(struct drm_device *dev) | |||
791 | struct nouveau_gpuobj *gpuobj; | 897 | struct nouveau_gpuobj *gpuobj; |
792 | int i; | 898 | int i; |
793 | 899 | ||
794 | if (dev_priv->card_type < NV_50) { | ||
795 | dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram); | ||
796 | if (!dev_priv->susres.ramin_copy) | ||
797 | return -ENOMEM; | ||
798 | |||
799 | for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) | ||
800 | dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i); | ||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | 900 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { |
805 | if (!gpuobj->im_backing) | 901 | if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) |
806 | continue; | 902 | continue; |
807 | 903 | ||
808 | gpuobj->im_backing_suspend = vmalloc(gpuobj->size); | 904 | gpuobj->suspend = vmalloc(gpuobj->size); |
809 | if (!gpuobj->im_backing_suspend) { | 905 | if (!gpuobj->suspend) { |
810 | nouveau_gpuobj_resume(dev); | 906 | nouveau_gpuobj_resume(dev); |
811 | return -ENOMEM; | 907 | return -ENOMEM; |
812 | } | 908 | } |
813 | 909 | ||
814 | for (i = 0; i < gpuobj->size; i += 4) | 910 | for (i = 0; i < gpuobj->size; i += 4) |
815 | gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i); | 911 | gpuobj->suspend[i/4] = nv_ro32(gpuobj, i); |
816 | } | 912 | } |
817 | 913 | ||
818 | return 0; | 914 | return 0; |
819 | } | 915 | } |
820 | 916 | ||
821 | void | 917 | void |
822 | nouveau_gpuobj_suspend_cleanup(struct drm_device *dev) | ||
823 | { | ||
824 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
825 | struct nouveau_gpuobj *gpuobj; | ||
826 | |||
827 | if (dev_priv->card_type < NV_50) { | ||
828 | vfree(dev_priv->susres.ramin_copy); | ||
829 | dev_priv->susres.ramin_copy = NULL; | ||
830 | return; | ||
831 | } | ||
832 | |||
833 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | ||
834 | if (!gpuobj->im_backing_suspend) | ||
835 | continue; | ||
836 | |||
837 | vfree(gpuobj->im_backing_suspend); | ||
838 | gpuobj->im_backing_suspend = NULL; | ||
839 | } | ||
840 | } | ||
841 | |||
842 | void | ||
843 | nouveau_gpuobj_resume(struct drm_device *dev) | 918 | nouveau_gpuobj_resume(struct drm_device *dev) |
844 | { | 919 | { |
845 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 920 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
846 | struct nouveau_gpuobj *gpuobj; | 921 | struct nouveau_gpuobj *gpuobj; |
847 | int i; | 922 | int i; |
848 | 923 | ||
849 | if (dev_priv->card_type < NV_50) { | ||
850 | for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) | ||
851 | nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]); | ||
852 | nouveau_gpuobj_suspend_cleanup(dev); | ||
853 | return; | ||
854 | } | ||
855 | |||
856 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | 924 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { |
857 | if (!gpuobj->im_backing_suspend) | 925 | if (!gpuobj->suspend) |
858 | continue; | 926 | continue; |
859 | 927 | ||
860 | for (i = 0; i < gpuobj->size; i += 4) | 928 | for (i = 0; i < gpuobj->size; i += 4) |
861 | nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]); | 929 | nv_wo32(gpuobj, i, gpuobj->suspend[i/4]); |
862 | dev_priv->engine.instmem.flush(dev); | 930 | |
931 | vfree(gpuobj->suspend); | ||
932 | gpuobj->suspend = NULL; | ||
863 | } | 933 | } |
864 | 934 | ||
865 | nouveau_gpuobj_suspend_cleanup(dev); | 935 | dev_priv->engine.instmem.flush(dev); |
866 | } | 936 | } |
867 | 937 | ||
868 | int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | 938 | int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, |
869 | struct drm_file *file_priv) | 939 | struct drm_file *file_priv) |
870 | { | 940 | { |
871 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
872 | struct drm_nouveau_grobj_alloc *init = data; | 941 | struct drm_nouveau_grobj_alloc *init = data; |
873 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
874 | struct nouveau_pgraph_object_class *grc; | ||
875 | struct nouveau_gpuobj *gr = NULL; | ||
876 | struct nouveau_channel *chan; | 942 | struct nouveau_channel *chan; |
877 | int ret; | 943 | int ret; |
878 | 944 | ||
879 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); | ||
880 | |||
881 | if (init->handle == ~0) | 945 | if (init->handle == ~0) |
882 | return -EINVAL; | 946 | return -EINVAL; |
883 | 947 | ||
884 | grc = pgraph->grclass; | 948 | chan = nouveau_channel_get(dev, file_priv, init->channel); |
885 | while (grc->id) { | 949 | if (IS_ERR(chan)) |
886 | if (grc->id == init->class) | 950 | return PTR_ERR(chan); |
887 | break; | ||
888 | grc++; | ||
889 | } | ||
890 | 951 | ||
891 | if (!grc->id) { | 952 | if (nouveau_ramht_find(chan, init->handle)) { |
892 | NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class); | 953 | ret = -EEXIST; |
893 | return -EPERM; | 954 | goto out; |
894 | } | 955 | } |
895 | 956 | ||
896 | if (nouveau_ramht_find(chan, init->handle)) | 957 | ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); |
897 | return -EEXIST; | ||
898 | |||
899 | if (!grc->software) | ||
900 | ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); | ||
901 | else | ||
902 | ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); | ||
903 | if (ret) { | 958 | if (ret) { |
904 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", | 959 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", |
905 | ret, init->channel, init->handle); | 960 | ret, init->channel, init->handle); |
906 | return ret; | ||
907 | } | ||
908 | |||
909 | ret = nouveau_ramht_insert(chan, init->handle, gr); | ||
910 | nouveau_gpuobj_ref(NULL, &gr); | ||
911 | if (ret) { | ||
912 | NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", | ||
913 | ret, init->channel, init->handle); | ||
914 | return ret; | ||
915 | } | 961 | } |
916 | 962 | ||
917 | return 0; | 963 | out: |
964 | nouveau_channel_put(&chan); | ||
965 | return ret; | ||
918 | } | 966 | } |
919 | 967 | ||
920 | int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | 968 | int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, |
921 | struct drm_file *file_priv) | 969 | struct drm_file *file_priv) |
922 | { | 970 | { |
923 | struct drm_nouveau_gpuobj_free *objfree = data; | 971 | struct drm_nouveau_gpuobj_free *objfree = data; |
924 | struct nouveau_gpuobj *gpuobj; | ||
925 | struct nouveau_channel *chan; | 972 | struct nouveau_channel *chan; |
973 | int ret; | ||
926 | 974 | ||
927 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); | 975 | chan = nouveau_channel_get(dev, file_priv, objfree->channel); |
976 | if (IS_ERR(chan)) | ||
977 | return PTR_ERR(chan); | ||
928 | 978 | ||
929 | gpuobj = nouveau_ramht_find(chan, objfree->handle); | 979 | /* Synchronize with the user channel */ |
930 | if (!gpuobj) | 980 | nouveau_channel_idle(chan); |
931 | return -ENOENT; | ||
932 | 981 | ||
933 | nouveau_ramht_remove(chan, objfree->handle); | 982 | ret = nouveau_ramht_remove(chan, objfree->handle); |
934 | return 0; | 983 | nouveau_channel_put(&chan); |
984 | return ret; | ||
935 | } | 985 | } |
936 | 986 | ||
937 | u32 | 987 | u32 |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 9f7b158f5825..d93814160bcf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c | |||
@@ -27,6 +27,10 @@ | |||
27 | #include "nouveau_drv.h" | 27 | #include "nouveau_drv.h" |
28 | #include "nouveau_pm.h" | 28 | #include "nouveau_pm.h" |
29 | 29 | ||
30 | #ifdef CONFIG_ACPI | ||
31 | #include <linux/acpi.h> | ||
32 | #endif | ||
33 | #include <linux/power_supply.h> | ||
30 | #include <linux/hwmon.h> | 34 | #include <linux/hwmon.h> |
31 | #include <linux/hwmon-sysfs.h> | 35 | #include <linux/hwmon-sysfs.h> |
32 | 36 | ||
@@ -446,6 +450,25 @@ nouveau_hwmon_fini(struct drm_device *dev) | |||
446 | #endif | 450 | #endif |
447 | } | 451 | } |
448 | 452 | ||
453 | #ifdef CONFIG_ACPI | ||
454 | static int | ||
455 | nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) | ||
456 | { | ||
457 | struct drm_nouveau_private *dev_priv = | ||
458 | container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb); | ||
459 | struct drm_device *dev = dev_priv->dev; | ||
460 | struct acpi_bus_event *entry = (struct acpi_bus_event *)data; | ||
461 | |||
462 | if (strcmp(entry->device_class, "ac_adapter") == 0) { | ||
463 | bool ac = power_supply_is_system_supplied(); | ||
464 | |||
465 | NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC"); | ||
466 | } | ||
467 | |||
468 | return NOTIFY_OK; | ||
469 | } | ||
470 | #endif | ||
471 | |||
449 | int | 472 | int |
450 | nouveau_pm_init(struct drm_device *dev) | 473 | nouveau_pm_init(struct drm_device *dev) |
451 | { | 474 | { |
@@ -485,6 +508,10 @@ nouveau_pm_init(struct drm_device *dev) | |||
485 | 508 | ||
486 | nouveau_sysfs_init(dev); | 509 | nouveau_sysfs_init(dev); |
487 | nouveau_hwmon_init(dev); | 510 | nouveau_hwmon_init(dev); |
511 | #ifdef CONFIG_ACPI | ||
512 | pm->acpi_nb.notifier_call = nouveau_pm_acpi_event; | ||
513 | register_acpi_notifier(&pm->acpi_nb); | ||
514 | #endif | ||
488 | 515 | ||
489 | return 0; | 516 | return 0; |
490 | } | 517 | } |
@@ -503,6 +530,9 @@ nouveau_pm_fini(struct drm_device *dev) | |||
503 | nouveau_perf_fini(dev); | 530 | nouveau_perf_fini(dev); |
504 | nouveau_volt_fini(dev); | 531 | nouveau_volt_fini(dev); |
505 | 532 | ||
533 | #ifdef CONFIG_ACPI | ||
534 | unregister_acpi_notifier(&pm->acpi_nb); | ||
535 | #endif | ||
506 | nouveau_hwmon_fini(dev); | 536 | nouveau_hwmon_fini(dev); |
507 | nouveau_sysfs_fini(dev); | 537 | nouveau_sysfs_fini(dev); |
508 | } | 538 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index 2d8580927ca4..bef3e6910418 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c | |||
@@ -104,17 +104,17 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, | |||
104 | nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); | 104 | nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); |
105 | 105 | ||
106 | if (dev_priv->card_type < NV_40) { | 106 | if (dev_priv->card_type < NV_40) { |
107 | ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) | | 107 | ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) | |
108 | (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | | 108 | (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | |
109 | (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); | 109 | (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); |
110 | } else | 110 | } else |
111 | if (dev_priv->card_type < NV_50) { | 111 | if (dev_priv->card_type < NV_50) { |
112 | ctx = (gpuobj->cinst >> 4) | | 112 | ctx = (gpuobj->pinst >> 4) | |
113 | (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | | 113 | (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | |
114 | (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); | 114 | (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); |
115 | } else { | 115 | } else { |
116 | if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { | 116 | if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { |
117 | ctx = (gpuobj->cinst << 10) | 2; | 117 | ctx = (gpuobj->cinst << 10) | chan->id; |
118 | } else { | 118 | } else { |
119 | ctx = (gpuobj->cinst >> 4) | | 119 | ctx = (gpuobj->cinst >> 4) | |
120 | ((gpuobj->engine << | 120 | ((gpuobj->engine << |
@@ -214,18 +214,19 @@ out: | |||
214 | spin_unlock_irqrestore(&chan->ramht->lock, flags); | 214 | spin_unlock_irqrestore(&chan->ramht->lock, flags); |
215 | } | 215 | } |
216 | 216 | ||
217 | void | 217 | int |
218 | nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) | 218 | nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) |
219 | { | 219 | { |
220 | struct nouveau_ramht_entry *entry; | 220 | struct nouveau_ramht_entry *entry; |
221 | 221 | ||
222 | entry = nouveau_ramht_remove_entry(chan, handle); | 222 | entry = nouveau_ramht_remove_entry(chan, handle); |
223 | if (!entry) | 223 | if (!entry) |
224 | return; | 224 | return -ENOENT; |
225 | 225 | ||
226 | nouveau_ramht_remove_hash(chan, entry->handle); | 226 | nouveau_ramht_remove_hash(chan, entry->handle); |
227 | nouveau_gpuobj_ref(NULL, &entry->gpuobj); | 227 | nouveau_gpuobj_ref(NULL, &entry->gpuobj); |
228 | kfree(entry); | 228 | kfree(entry); |
229 | return 0; | ||
229 | } | 230 | } |
230 | 231 | ||
231 | struct nouveau_gpuobj * | 232 | struct nouveau_gpuobj * |
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h index b79cb5e1a8f1..c82de98fee0e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.h +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h | |||
@@ -48,7 +48,7 @@ extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **, | |||
48 | 48 | ||
49 | extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, | 49 | extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, |
50 | struct nouveau_gpuobj *); | 50 | struct nouveau_gpuobj *); |
51 | extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle); | 51 | extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle); |
52 | extern struct nouveau_gpuobj * | 52 | extern struct nouveau_gpuobj * |
53 | nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); | 53 | nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); |
54 | 54 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 1b42541ca9e5..04e8fb795269 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h | |||
@@ -45,6 +45,11 @@ | |||
45 | # define NV04_PFB_REF_CMD_REFRESH (1 << 0) | 45 | # define NV04_PFB_REF_CMD_REFRESH (1 << 0) |
46 | #define NV04_PFB_PRE 0x001002d4 | 46 | #define NV04_PFB_PRE 0x001002d4 |
47 | # define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0) | 47 | # define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0) |
48 | #define NV20_PFB_ZCOMP(i) (0x00100300 + 4*(i)) | ||
49 | # define NV20_PFB_ZCOMP_MODE_32 (4 << 24) | ||
50 | # define NV20_PFB_ZCOMP_EN (1 << 31) | ||
51 | # define NV25_PFB_ZCOMP_MODE_16 (1 << 20) | ||
52 | # define NV25_PFB_ZCOMP_MODE_32 (2 << 20) | ||
48 | #define NV10_PFB_CLOSE_PAGE2 0x0010033c | 53 | #define NV10_PFB_CLOSE_PAGE2 0x0010033c |
49 | #define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i)) | 54 | #define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i)) |
50 | #define NV40_PFB_TILE(i) (0x00100600 + (i*16)) | 55 | #define NV40_PFB_TILE(i) (0x00100600 + (i*16)) |
@@ -74,17 +79,6 @@ | |||
74 | # define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 | 79 | # define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 |
75 | # define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 | 80 | # define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 |
76 | 81 | ||
77 | /* DMA object defines */ | ||
78 | #define NV_DMA_ACCESS_RW 0 | ||
79 | #define NV_DMA_ACCESS_RO 1 | ||
80 | #define NV_DMA_ACCESS_WO 2 | ||
81 | #define NV_DMA_TARGET_VIDMEM 0 | ||
82 | #define NV_DMA_TARGET_PCI 2 | ||
83 | #define NV_DMA_TARGET_AGP 3 | ||
84 | /* The following is not a real value used by the card, it's changed by | ||
85 | * nouveau_object_dma_create */ | ||
86 | #define NV_DMA_TARGET_PCI_NONLINEAR 8 | ||
87 | |||
88 | /* Some object classes we care about in the drm */ | 82 | /* Some object classes we care about in the drm */ |
89 | #define NV_CLASS_DMA_FROM_MEMORY 0x00000002 | 83 | #define NV_CLASS_DMA_FROM_MEMORY 0x00000002 |
90 | #define NV_CLASS_DMA_TO_MEMORY 0x00000003 | 84 | #define NV_CLASS_DMA_TO_MEMORY 0x00000003 |
@@ -332,6 +326,7 @@ | |||
332 | #define NV04_PGRAPH_BSWIZZLE5 0x004006A0 | 326 | #define NV04_PGRAPH_BSWIZZLE5 0x004006A0 |
333 | #define NV03_PGRAPH_STATUS 0x004006B0 | 327 | #define NV03_PGRAPH_STATUS 0x004006B0 |
334 | #define NV04_PGRAPH_STATUS 0x00400700 | 328 | #define NV04_PGRAPH_STATUS 0x00400700 |
329 | # define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000 | ||
335 | #define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 | 330 | #define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 |
336 | #define NV04_PGRAPH_TRAPPED_DATA 0x00400708 | 331 | #define NV04_PGRAPH_TRAPPED_DATA 0x00400708 |
337 | #define NV04_PGRAPH_SURFACE 0x0040070C | 332 | #define NV04_PGRAPH_SURFACE 0x0040070C |
@@ -378,6 +373,7 @@ | |||
378 | #define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16)) | 373 | #define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16)) |
379 | #define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) | 374 | #define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) |
380 | #define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) | 375 | #define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) |
376 | #define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i)) | ||
381 | #define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) | 377 | #define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) |
382 | #define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) | 378 | #define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) |
383 | #define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) | 379 | #define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) |
@@ -714,31 +710,32 @@ | |||
714 | #define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010 | 710 | #define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010 |
715 | #define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020 | 711 | #define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020 |
716 | #define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040 | 712 | #define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040 |
717 | #define NV50_PDISPLAY_INTR_EN 0x0061002c | 713 | #define NV50_PDISPLAY_INTR_EN_0 0x00610028 |
718 | #define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c | 714 | #define NV50_PDISPLAY_INTR_EN_1 0x0061002c |
719 | #define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2)) | 715 | #define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC 0x0000000c |
720 | #define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004 | 716 | #define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2)) |
721 | #define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008 | 717 | #define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0 0x00000004 |
722 | #define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010 | 718 | #define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1 0x00000008 |
723 | #define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020 | 719 | #define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 0x00000010 |
724 | #define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040 | 720 | #define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 0x00000020 |
721 | #define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40 0x00000040 | ||
725 | #define NV50_PDISPLAY_UNK30_CTRL 0x00610030 | 722 | #define NV50_PDISPLAY_UNK30_CTRL 0x00610030 |
726 | #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200 | 723 | #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200 |
727 | #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400 | 724 | #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400 |
728 | #define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000 | 725 | #define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000 |
729 | #define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080 | 726 | #define NV50_PDISPLAY_TRAPPED_ADDR(i) ((i) * 0x08 + 0x00610080) |
730 | #define NV50_PDISPLAY_TRAPPED_DATA 0x00610084 | 727 | #define NV50_PDISPLAY_TRAPPED_DATA(i) ((i) * 0x08 + 0x00610084) |
731 | #define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200) | 728 | #define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200) |
732 | #define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010 | 729 | #define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010 |
733 | #define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000 | 730 | #define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000 |
734 | #define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010 | 731 | #define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED 0x00000010 |
735 | #define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204) | 732 | #define NV50_PDISPLAY_EVO_DMA_CB(i) ((i) * 0x10 + 0x00610204) |
736 | #define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002 | 733 | #define NV50_PDISPLAY_EVO_DMA_CB_LOCATION 0x00000002 |
737 | #define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000 | 734 | #define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM 0x00000000 |
738 | #define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002 | 735 | #define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM 0x00000002 |
739 | #define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001 | 736 | #define NV50_PDISPLAY_EVO_DMA_CB_VALID 0x00000001 |
740 | #define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208) | 737 | #define NV50_PDISPLAY_EVO_UNK2(i) ((i) * 0x10 + 0x00610208) |
741 | #define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c) | 738 | #define NV50_PDISPLAY_EVO_HASH_TAG(i) ((i) * 0x10 + 0x0061020c) |
742 | 739 | ||
743 | #define NV50_PDISPLAY_CURSOR 0x00610270 | 740 | #define NV50_PDISPLAY_CURSOR 0x00610270 |
744 | #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270) | 741 | #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270) |
@@ -746,15 +743,11 @@ | |||
746 | #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000 | 743 | #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000 |
747 | #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000 | 744 | #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000 |
748 | 745 | ||
749 | #define NV50_PDISPLAY_CTRL_STATE 0x00610300 | 746 | #define NV50_PDISPLAY_PIO_CTRL 0x00610300 |
750 | #define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000 | 747 | #define NV50_PDISPLAY_PIO_CTRL_PENDING 0x80000000 |
751 | #define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc | 748 | #define NV50_PDISPLAY_PIO_CTRL_MTHD 0x00001ffc |
752 | #define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001 | 749 | #define NV50_PDISPLAY_PIO_CTRL_ENABLED 0x00000001 |
753 | #define NV50_PDISPLAY_CTRL_VAL 0x00610304 | 750 | #define NV50_PDISPLAY_PIO_DATA 0x00610304 |
754 | #define NV50_PDISPLAY_UNK_380 0x00610380 | ||
755 | #define NV50_PDISPLAY_RAM_AMOUNT 0x00610384 | ||
756 | #define NV50_PDISPLAY_UNK_388 0x00610388 | ||
757 | #define NV50_PDISPLAY_UNK_38C 0x0061038c | ||
758 | 751 | ||
759 | #define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r) | 752 | #define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r) |
760 | #define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r) | 753 | #define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index d4ac97007038..9a250eb53098 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -14,7 +14,7 @@ struct nouveau_sgdma_be { | |||
14 | dma_addr_t *pages; | 14 | dma_addr_t *pages; |
15 | unsigned nr_pages; | 15 | unsigned nr_pages; |
16 | 16 | ||
17 | unsigned pte_start; | 17 | u64 offset; |
18 | bool bound; | 18 | bool bound; |
19 | }; | 19 | }; |
20 | 20 | ||
@@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be) | |||
74 | } | 74 | } |
75 | } | 75 | } |
76 | 76 | ||
77 | static inline unsigned | ||
78 | nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset) | ||
79 | { | ||
80 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
81 | unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT); | ||
82 | |||
83 | if (dev_priv->card_type < NV_50) | ||
84 | return pte + 2; | ||
85 | |||
86 | return pte << 1; | ||
87 | } | ||
88 | |||
89 | static int | 77 | static int |
90 | nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | 78 | nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) |
91 | { | 79 | { |
@@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | |||
97 | 85 | ||
98 | NV_DEBUG(dev, "pg=0x%lx\n", mem->start); | 86 | NV_DEBUG(dev, "pg=0x%lx\n", mem->start); |
99 | 87 | ||
100 | pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT); | 88 | nvbe->offset = mem->start << PAGE_SHIFT; |
101 | nvbe->pte_start = pte; | 89 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
102 | for (i = 0; i < nvbe->nr_pages; i++) { | 90 | for (i = 0; i < nvbe->nr_pages; i++) { |
103 | dma_addr_t dma_offset = nvbe->pages[i]; | 91 | dma_addr_t dma_offset = nvbe->pages[i]; |
104 | uint32_t offset_l = lower_32_bits(dma_offset); | 92 | uint32_t offset_l = lower_32_bits(dma_offset); |
105 | uint32_t offset_h = upper_32_bits(dma_offset); | ||
106 | |||
107 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { | ||
108 | if (dev_priv->card_type < NV_50) { | ||
109 | nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); | ||
110 | pte += 1; | ||
111 | } else { | ||
112 | nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21); | ||
113 | nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff); | ||
114 | pte += 2; | ||
115 | } | ||
116 | 93 | ||
94 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { | ||
95 | nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); | ||
117 | dma_offset += NV_CTXDMA_PAGE_SIZE; | 96 | dma_offset += NV_CTXDMA_PAGE_SIZE; |
118 | } | 97 | } |
119 | } | 98 | } |
120 | dev_priv->engine.instmem.flush(nvbe->dev); | ||
121 | |||
122 | if (dev_priv->card_type == NV_50) { | ||
123 | dev_priv->engine.fifo.tlb_flush(dev); | ||
124 | dev_priv->engine.graph.tlb_flush(dev); | ||
125 | } | ||
126 | 99 | ||
127 | nvbe->bound = true; | 100 | nvbe->bound = true; |
128 | return 0; | 101 | return 0; |
@@ -142,28 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be) | |||
142 | if (!nvbe->bound) | 115 | if (!nvbe->bound) |
143 | return 0; | 116 | return 0; |
144 | 117 | ||
145 | pte = nvbe->pte_start; | 118 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
146 | for (i = 0; i < nvbe->nr_pages; i++) { | 119 | for (i = 0; i < nvbe->nr_pages; i++) { |
147 | dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; | 120 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) |
148 | 121 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); | |
149 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { | ||
150 | if (dev_priv->card_type < NV_50) { | ||
151 | nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3); | ||
152 | pte += 1; | ||
153 | } else { | ||
154 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); | ||
155 | nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000); | ||
156 | pte += 2; | ||
157 | } | ||
158 | |||
159 | dma_offset += NV_CTXDMA_PAGE_SIZE; | ||
160 | } | ||
161 | } | ||
162 | dev_priv->engine.instmem.flush(nvbe->dev); | ||
163 | |||
164 | if (dev_priv->card_type == NV_50) { | ||
165 | dev_priv->engine.fifo.tlb_flush(dev); | ||
166 | dev_priv->engine.graph.tlb_flush(dev); | ||
167 | } | 122 | } |
168 | 123 | ||
169 | nvbe->bound = false; | 124 | nvbe->bound = false; |
@@ -186,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be) | |||
186 | } | 141 | } |
187 | } | 142 | } |
188 | 143 | ||
144 | static int | ||
145 | nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | ||
146 | { | ||
147 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | ||
148 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | ||
149 | |||
150 | nvbe->offset = mem->start << PAGE_SHIFT; | ||
151 | |||
152 | nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset, | ||
153 | nvbe->nr_pages << PAGE_SHIFT, nvbe->pages); | ||
154 | nvbe->bound = true; | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static int | ||
159 | nv50_sgdma_unbind(struct ttm_backend *be) | ||
160 | { | ||
161 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | ||
162 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | ||
163 | |||
164 | if (!nvbe->bound) | ||
165 | return 0; | ||
166 | |||
167 | nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset, | ||
168 | nvbe->nr_pages << PAGE_SHIFT); | ||
169 | nvbe->bound = false; | ||
170 | return 0; | ||
171 | } | ||
172 | |||
189 | static struct ttm_backend_func nouveau_sgdma_backend = { | 173 | static struct ttm_backend_func nouveau_sgdma_backend = { |
190 | .populate = nouveau_sgdma_populate, | 174 | .populate = nouveau_sgdma_populate, |
191 | .clear = nouveau_sgdma_clear, | 175 | .clear = nouveau_sgdma_clear, |
@@ -194,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = { | |||
194 | .destroy = nouveau_sgdma_destroy | 178 | .destroy = nouveau_sgdma_destroy |
195 | }; | 179 | }; |
196 | 180 | ||
181 | static struct ttm_backend_func nv50_sgdma_backend = { | ||
182 | .populate = nouveau_sgdma_populate, | ||
183 | .clear = nouveau_sgdma_clear, | ||
184 | .bind = nv50_sgdma_bind, | ||
185 | .unbind = nv50_sgdma_unbind, | ||
186 | .destroy = nouveau_sgdma_destroy | ||
187 | }; | ||
188 | |||
197 | struct ttm_backend * | 189 | struct ttm_backend * |
198 | nouveau_sgdma_init_ttm(struct drm_device *dev) | 190 | nouveau_sgdma_init_ttm(struct drm_device *dev) |
199 | { | 191 | { |
200 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 192 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
201 | struct nouveau_sgdma_be *nvbe; | 193 | struct nouveau_sgdma_be *nvbe; |
202 | 194 | ||
203 | if (!dev_priv->gart_info.sg_ctxdma) | ||
204 | return NULL; | ||
205 | |||
206 | nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); | 195 | nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); |
207 | if (!nvbe) | 196 | if (!nvbe) |
208 | return NULL; | 197 | return NULL; |
209 | 198 | ||
210 | nvbe->dev = dev; | 199 | nvbe->dev = dev; |
211 | 200 | ||
212 | nvbe->backend.func = &nouveau_sgdma_backend; | 201 | if (dev_priv->card_type < NV_50) |
213 | 202 | nvbe->backend.func = &nouveau_sgdma_backend; | |
203 | else | ||
204 | nvbe->backend.func = &nv50_sgdma_backend; | ||
214 | return &nvbe->backend; | 205 | return &nvbe->backend; |
215 | } | 206 | } |
216 | 207 | ||
@@ -218,7 +209,6 @@ int | |||
218 | nouveau_sgdma_init(struct drm_device *dev) | 209 | nouveau_sgdma_init(struct drm_device *dev) |
219 | { | 210 | { |
220 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 211 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
221 | struct pci_dev *pdev = dev->pdev; | ||
222 | struct nouveau_gpuobj *gpuobj = NULL; | 212 | struct nouveau_gpuobj *gpuobj = NULL; |
223 | uint32_t aper_size, obj_size; | 213 | uint32_t aper_size, obj_size; |
224 | int i, ret; | 214 | int i, ret; |
@@ -231,68 +221,40 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
231 | 221 | ||
232 | obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; | 222 | obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; |
233 | obj_size += 8; /* ctxdma header */ | 223 | obj_size += 8; /* ctxdma header */ |
234 | } else { | ||
235 | /* 1 entire VM page table */ | ||
236 | aper_size = (512 * 1024 * 1024); | ||
237 | obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; | ||
238 | } | ||
239 | |||
240 | ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, | ||
241 | NVOBJ_FLAG_ZERO_ALLOC | | ||
242 | NVOBJ_FLAG_ZERO_FREE, &gpuobj); | ||
243 | if (ret) { | ||
244 | NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); | ||
245 | return ret; | ||
246 | } | ||
247 | |||
248 | dev_priv->gart_info.sg_dummy_page = | ||
249 | alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO); | ||
250 | if (!dev_priv->gart_info.sg_dummy_page) { | ||
251 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
252 | return -ENOMEM; | ||
253 | } | ||
254 | 224 | ||
255 | set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); | 225 | ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, |
256 | dev_priv->gart_info.sg_dummy_bus = | 226 | NVOBJ_FLAG_ZERO_ALLOC | |
257 | pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, | 227 | NVOBJ_FLAG_ZERO_FREE, &gpuobj); |
258 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 228 | if (ret) { |
259 | if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { | 229 | NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); |
260 | nouveau_gpuobj_ref(NULL, &gpuobj); | 230 | return ret; |
261 | return -EFAULT; | 231 | } |
262 | } | ||
263 | 232 | ||
264 | if (dev_priv->card_type < NV_50) { | ||
265 | /* special case, allocated from global instmem heap so | ||
266 | * cinst is invalid, we use it on all channels though so | ||
267 | * cinst needs to be valid, set it the same as pinst | ||
268 | */ | ||
269 | gpuobj->cinst = gpuobj->pinst; | ||
270 | |||
271 | /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and | ||
272 | * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE | ||
273 | * on those cards? */ | ||
274 | nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | | 233 | nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | |
275 | (1 << 12) /* PT present */ | | 234 | (1 << 12) /* PT present */ | |
276 | (0 << 13) /* PT *not* linear */ | | 235 | (0 << 13) /* PT *not* linear */ | |
277 | (NV_DMA_ACCESS_RW << 14) | | 236 | (0 << 14) /* RW */ | |
278 | (NV_DMA_TARGET_PCI << 16)); | 237 | (2 << 16) /* PCI */); |
279 | nv_wo32(gpuobj, 4, aper_size - 1); | 238 | nv_wo32(gpuobj, 4, aper_size - 1); |
280 | for (i = 2; i < 2 + (aper_size >> 12); i++) { | 239 | for (i = 2; i < 2 + (aper_size >> 12); i++) |
281 | nv_wo32(gpuobj, i * 4, | 240 | nv_wo32(gpuobj, i * 4, 0x00000000); |
282 | dev_priv->gart_info.sg_dummy_bus | 3); | 241 | |
283 | } | 242 | dev_priv->gart_info.sg_ctxdma = gpuobj; |
284 | } else { | 243 | dev_priv->gart_info.aper_base = 0; |
285 | for (i = 0; i < obj_size; i += 8) { | 244 | dev_priv->gart_info.aper_size = aper_size; |
286 | nv_wo32(gpuobj, i + 0, 0x00000000); | 245 | } else |
287 | nv_wo32(gpuobj, i + 4, 0x00000000); | 246 | if (dev_priv->chan_vm) { |
288 | } | 247 | ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024, |
248 | 12, NV_MEM_ACCESS_RW, | ||
249 | &dev_priv->gart_info.vma); | ||
250 | if (ret) | ||
251 | return ret; | ||
252 | |||
253 | dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset; | ||
254 | dev_priv->gart_info.aper_size = 512 * 1024 * 1024; | ||
289 | } | 255 | } |
290 | dev_priv->engine.instmem.flush(dev); | ||
291 | 256 | ||
292 | dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; | 257 | dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; |
293 | dev_priv->gart_info.aper_base = 0; | ||
294 | dev_priv->gart_info.aper_size = aper_size; | ||
295 | dev_priv->gart_info.sg_ctxdma = gpuobj; | ||
296 | return 0; | 258 | return 0; |
297 | } | 259 | } |
298 | 260 | ||
@@ -301,31 +263,19 @@ nouveau_sgdma_takedown(struct drm_device *dev) | |||
301 | { | 263 | { |
302 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 264 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
303 | 265 | ||
304 | if (dev_priv->gart_info.sg_dummy_page) { | ||
305 | pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, | ||
306 | NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
307 | unlock_page(dev_priv->gart_info.sg_dummy_page); | ||
308 | __free_page(dev_priv->gart_info.sg_dummy_page); | ||
309 | dev_priv->gart_info.sg_dummy_page = NULL; | ||
310 | dev_priv->gart_info.sg_dummy_bus = 0; | ||
311 | } | ||
312 | |||
313 | nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); | 266 | nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); |
267 | nouveau_vm_put(&dev_priv->gart_info.vma); | ||
314 | } | 268 | } |
315 | 269 | ||
316 | int | 270 | uint32_t |
317 | nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) | 271 | nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset) |
318 | { | 272 | { |
319 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 273 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
320 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; | 274 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; |
321 | int pte; | 275 | int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
322 | 276 | ||
323 | pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2; | 277 | BUG_ON(dev_priv->card_type >= NV_50); |
324 | if (dev_priv->card_type < NV_50) { | ||
325 | *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK; | ||
326 | return 0; | ||
327 | } | ||
328 | 278 | ||
329 | NV_ERROR(dev, "Unimplemented on NV50\n"); | 279 | return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) | |
330 | return -EINVAL; | 280 | (offset & NV_CTXDMA_PAGE_MASK); |
331 | } | 281 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 049f755567e5..8eac943e8fd2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
53 | engine->instmem.takedown = nv04_instmem_takedown; | 53 | engine->instmem.takedown = nv04_instmem_takedown; |
54 | engine->instmem.suspend = nv04_instmem_suspend; | 54 | engine->instmem.suspend = nv04_instmem_suspend; |
55 | engine->instmem.resume = nv04_instmem_resume; | 55 | engine->instmem.resume = nv04_instmem_resume; |
56 | engine->instmem.populate = nv04_instmem_populate; | 56 | engine->instmem.get = nv04_instmem_get; |
57 | engine->instmem.clear = nv04_instmem_clear; | 57 | engine->instmem.put = nv04_instmem_put; |
58 | engine->instmem.bind = nv04_instmem_bind; | 58 | engine->instmem.map = nv04_instmem_map; |
59 | engine->instmem.unbind = nv04_instmem_unbind; | 59 | engine->instmem.unmap = nv04_instmem_unmap; |
60 | engine->instmem.flush = nv04_instmem_flush; | 60 | engine->instmem.flush = nv04_instmem_flush; |
61 | engine->mc.init = nv04_mc_init; | 61 | engine->mc.init = nv04_mc_init; |
62 | engine->mc.takedown = nv04_mc_takedown; | 62 | engine->mc.takedown = nv04_mc_takedown; |
@@ -65,7 +65,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
65 | engine->timer.takedown = nv04_timer_takedown; | 65 | engine->timer.takedown = nv04_timer_takedown; |
66 | engine->fb.init = nv04_fb_init; | 66 | engine->fb.init = nv04_fb_init; |
67 | engine->fb.takedown = nv04_fb_takedown; | 67 | engine->fb.takedown = nv04_fb_takedown; |
68 | engine->graph.grclass = nv04_graph_grclass; | ||
69 | engine->graph.init = nv04_graph_init; | 68 | engine->graph.init = nv04_graph_init; |
70 | engine->graph.takedown = nv04_graph_takedown; | 69 | engine->graph.takedown = nv04_graph_takedown; |
71 | engine->graph.fifo_access = nv04_graph_fifo_access; | 70 | engine->graph.fifo_access = nv04_graph_fifo_access; |
@@ -76,7 +75,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
76 | engine->graph.unload_context = nv04_graph_unload_context; | 75 | engine->graph.unload_context = nv04_graph_unload_context; |
77 | engine->fifo.channels = 16; | 76 | engine->fifo.channels = 16; |
78 | engine->fifo.init = nv04_fifo_init; | 77 | engine->fifo.init = nv04_fifo_init; |
79 | engine->fifo.takedown = nouveau_stub_takedown; | 78 | engine->fifo.takedown = nv04_fifo_fini; |
80 | engine->fifo.disable = nv04_fifo_disable; | 79 | engine->fifo.disable = nv04_fifo_disable; |
81 | engine->fifo.enable = nv04_fifo_enable; | 80 | engine->fifo.enable = nv04_fifo_enable; |
82 | engine->fifo.reassign = nv04_fifo_reassign; | 81 | engine->fifo.reassign = nv04_fifo_reassign; |
@@ -99,16 +98,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
99 | engine->pm.clock_get = nv04_pm_clock_get; | 98 | engine->pm.clock_get = nv04_pm_clock_get; |
100 | engine->pm.clock_pre = nv04_pm_clock_pre; | 99 | engine->pm.clock_pre = nv04_pm_clock_pre; |
101 | engine->pm.clock_set = nv04_pm_clock_set; | 100 | engine->pm.clock_set = nv04_pm_clock_set; |
101 | engine->crypt.init = nouveau_stub_init; | ||
102 | engine->crypt.takedown = nouveau_stub_takedown; | ||
103 | engine->vram.init = nouveau_mem_detect; | ||
104 | engine->vram.flags_valid = nouveau_mem_flags_valid; | ||
102 | break; | 105 | break; |
103 | case 0x10: | 106 | case 0x10: |
104 | engine->instmem.init = nv04_instmem_init; | 107 | engine->instmem.init = nv04_instmem_init; |
105 | engine->instmem.takedown = nv04_instmem_takedown; | 108 | engine->instmem.takedown = nv04_instmem_takedown; |
106 | engine->instmem.suspend = nv04_instmem_suspend; | 109 | engine->instmem.suspend = nv04_instmem_suspend; |
107 | engine->instmem.resume = nv04_instmem_resume; | 110 | engine->instmem.resume = nv04_instmem_resume; |
108 | engine->instmem.populate = nv04_instmem_populate; | 111 | engine->instmem.get = nv04_instmem_get; |
109 | engine->instmem.clear = nv04_instmem_clear; | 112 | engine->instmem.put = nv04_instmem_put; |
110 | engine->instmem.bind = nv04_instmem_bind; | 113 | engine->instmem.map = nv04_instmem_map; |
111 | engine->instmem.unbind = nv04_instmem_unbind; | 114 | engine->instmem.unmap = nv04_instmem_unmap; |
112 | engine->instmem.flush = nv04_instmem_flush; | 115 | engine->instmem.flush = nv04_instmem_flush; |
113 | engine->mc.init = nv04_mc_init; | 116 | engine->mc.init = nv04_mc_init; |
114 | engine->mc.takedown = nv04_mc_takedown; | 117 | engine->mc.takedown = nv04_mc_takedown; |
@@ -117,8 +120,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
117 | engine->timer.takedown = nv04_timer_takedown; | 120 | engine->timer.takedown = nv04_timer_takedown; |
118 | engine->fb.init = nv10_fb_init; | 121 | engine->fb.init = nv10_fb_init; |
119 | engine->fb.takedown = nv10_fb_takedown; | 122 | engine->fb.takedown = nv10_fb_takedown; |
120 | engine->fb.set_region_tiling = nv10_fb_set_region_tiling; | 123 | engine->fb.init_tile_region = nv10_fb_init_tile_region; |
121 | engine->graph.grclass = nv10_graph_grclass; | 124 | engine->fb.set_tile_region = nv10_fb_set_tile_region; |
125 | engine->fb.free_tile_region = nv10_fb_free_tile_region; | ||
122 | engine->graph.init = nv10_graph_init; | 126 | engine->graph.init = nv10_graph_init; |
123 | engine->graph.takedown = nv10_graph_takedown; | 127 | engine->graph.takedown = nv10_graph_takedown; |
124 | engine->graph.channel = nv10_graph_channel; | 128 | engine->graph.channel = nv10_graph_channel; |
@@ -127,17 +131,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
127 | engine->graph.fifo_access = nv04_graph_fifo_access; | 131 | engine->graph.fifo_access = nv04_graph_fifo_access; |
128 | engine->graph.load_context = nv10_graph_load_context; | 132 | engine->graph.load_context = nv10_graph_load_context; |
129 | engine->graph.unload_context = nv10_graph_unload_context; | 133 | engine->graph.unload_context = nv10_graph_unload_context; |
130 | engine->graph.set_region_tiling = nv10_graph_set_region_tiling; | 134 | engine->graph.set_tile_region = nv10_graph_set_tile_region; |
131 | engine->fifo.channels = 32; | 135 | engine->fifo.channels = 32; |
132 | engine->fifo.init = nv10_fifo_init; | 136 | engine->fifo.init = nv10_fifo_init; |
133 | engine->fifo.takedown = nouveau_stub_takedown; | 137 | engine->fifo.takedown = nv04_fifo_fini; |
134 | engine->fifo.disable = nv04_fifo_disable; | 138 | engine->fifo.disable = nv04_fifo_disable; |
135 | engine->fifo.enable = nv04_fifo_enable; | 139 | engine->fifo.enable = nv04_fifo_enable; |
136 | engine->fifo.reassign = nv04_fifo_reassign; | 140 | engine->fifo.reassign = nv04_fifo_reassign; |
137 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | 141 | engine->fifo.cache_pull = nv04_fifo_cache_pull; |
138 | engine->fifo.channel_id = nv10_fifo_channel_id; | 142 | engine->fifo.channel_id = nv10_fifo_channel_id; |
139 | engine->fifo.create_context = nv10_fifo_create_context; | 143 | engine->fifo.create_context = nv10_fifo_create_context; |
140 | engine->fifo.destroy_context = nv10_fifo_destroy_context; | 144 | engine->fifo.destroy_context = nv04_fifo_destroy_context; |
141 | engine->fifo.load_context = nv10_fifo_load_context; | 145 | engine->fifo.load_context = nv10_fifo_load_context; |
142 | engine->fifo.unload_context = nv10_fifo_unload_context; | 146 | engine->fifo.unload_context = nv10_fifo_unload_context; |
143 | engine->display.early_init = nv04_display_early_init; | 147 | engine->display.early_init = nv04_display_early_init; |
@@ -153,16 +157,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
153 | engine->pm.clock_get = nv04_pm_clock_get; | 157 | engine->pm.clock_get = nv04_pm_clock_get; |
154 | engine->pm.clock_pre = nv04_pm_clock_pre; | 158 | engine->pm.clock_pre = nv04_pm_clock_pre; |
155 | engine->pm.clock_set = nv04_pm_clock_set; | 159 | engine->pm.clock_set = nv04_pm_clock_set; |
160 | engine->crypt.init = nouveau_stub_init; | ||
161 | engine->crypt.takedown = nouveau_stub_takedown; | ||
162 | engine->vram.init = nouveau_mem_detect; | ||
163 | engine->vram.flags_valid = nouveau_mem_flags_valid; | ||
156 | break; | 164 | break; |
157 | case 0x20: | 165 | case 0x20: |
158 | engine->instmem.init = nv04_instmem_init; | 166 | engine->instmem.init = nv04_instmem_init; |
159 | engine->instmem.takedown = nv04_instmem_takedown; | 167 | engine->instmem.takedown = nv04_instmem_takedown; |
160 | engine->instmem.suspend = nv04_instmem_suspend; | 168 | engine->instmem.suspend = nv04_instmem_suspend; |
161 | engine->instmem.resume = nv04_instmem_resume; | 169 | engine->instmem.resume = nv04_instmem_resume; |
162 | engine->instmem.populate = nv04_instmem_populate; | 170 | engine->instmem.get = nv04_instmem_get; |
163 | engine->instmem.clear = nv04_instmem_clear; | 171 | engine->instmem.put = nv04_instmem_put; |
164 | engine->instmem.bind = nv04_instmem_bind; | 172 | engine->instmem.map = nv04_instmem_map; |
165 | engine->instmem.unbind = nv04_instmem_unbind; | 173 | engine->instmem.unmap = nv04_instmem_unmap; |
166 | engine->instmem.flush = nv04_instmem_flush; | 174 | engine->instmem.flush = nv04_instmem_flush; |
167 | engine->mc.init = nv04_mc_init; | 175 | engine->mc.init = nv04_mc_init; |
168 | engine->mc.takedown = nv04_mc_takedown; | 176 | engine->mc.takedown = nv04_mc_takedown; |
@@ -171,8 +179,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
171 | engine->timer.takedown = nv04_timer_takedown; | 179 | engine->timer.takedown = nv04_timer_takedown; |
172 | engine->fb.init = nv10_fb_init; | 180 | engine->fb.init = nv10_fb_init; |
173 | engine->fb.takedown = nv10_fb_takedown; | 181 | engine->fb.takedown = nv10_fb_takedown; |
174 | engine->fb.set_region_tiling = nv10_fb_set_region_tiling; | 182 | engine->fb.init_tile_region = nv10_fb_init_tile_region; |
175 | engine->graph.grclass = nv20_graph_grclass; | 183 | engine->fb.set_tile_region = nv10_fb_set_tile_region; |
184 | engine->fb.free_tile_region = nv10_fb_free_tile_region; | ||
176 | engine->graph.init = nv20_graph_init; | 185 | engine->graph.init = nv20_graph_init; |
177 | engine->graph.takedown = nv20_graph_takedown; | 186 | engine->graph.takedown = nv20_graph_takedown; |
178 | engine->graph.channel = nv10_graph_channel; | 187 | engine->graph.channel = nv10_graph_channel; |
@@ -181,17 +190,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
181 | engine->graph.fifo_access = nv04_graph_fifo_access; | 190 | engine->graph.fifo_access = nv04_graph_fifo_access; |
182 | engine->graph.load_context = nv20_graph_load_context; | 191 | engine->graph.load_context = nv20_graph_load_context; |
183 | engine->graph.unload_context = nv20_graph_unload_context; | 192 | engine->graph.unload_context = nv20_graph_unload_context; |
184 | engine->graph.set_region_tiling = nv20_graph_set_region_tiling; | 193 | engine->graph.set_tile_region = nv20_graph_set_tile_region; |
185 | engine->fifo.channels = 32; | 194 | engine->fifo.channels = 32; |
186 | engine->fifo.init = nv10_fifo_init; | 195 | engine->fifo.init = nv10_fifo_init; |
187 | engine->fifo.takedown = nouveau_stub_takedown; | 196 | engine->fifo.takedown = nv04_fifo_fini; |
188 | engine->fifo.disable = nv04_fifo_disable; | 197 | engine->fifo.disable = nv04_fifo_disable; |
189 | engine->fifo.enable = nv04_fifo_enable; | 198 | engine->fifo.enable = nv04_fifo_enable; |
190 | engine->fifo.reassign = nv04_fifo_reassign; | 199 | engine->fifo.reassign = nv04_fifo_reassign; |
191 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | 200 | engine->fifo.cache_pull = nv04_fifo_cache_pull; |
192 | engine->fifo.channel_id = nv10_fifo_channel_id; | 201 | engine->fifo.channel_id = nv10_fifo_channel_id; |
193 | engine->fifo.create_context = nv10_fifo_create_context; | 202 | engine->fifo.create_context = nv10_fifo_create_context; |
194 | engine->fifo.destroy_context = nv10_fifo_destroy_context; | 203 | engine->fifo.destroy_context = nv04_fifo_destroy_context; |
195 | engine->fifo.load_context = nv10_fifo_load_context; | 204 | engine->fifo.load_context = nv10_fifo_load_context; |
196 | engine->fifo.unload_context = nv10_fifo_unload_context; | 205 | engine->fifo.unload_context = nv10_fifo_unload_context; |
197 | engine->display.early_init = nv04_display_early_init; | 206 | engine->display.early_init = nv04_display_early_init; |
@@ -207,16 +216,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
207 | engine->pm.clock_get = nv04_pm_clock_get; | 216 | engine->pm.clock_get = nv04_pm_clock_get; |
208 | engine->pm.clock_pre = nv04_pm_clock_pre; | 217 | engine->pm.clock_pre = nv04_pm_clock_pre; |
209 | engine->pm.clock_set = nv04_pm_clock_set; | 218 | engine->pm.clock_set = nv04_pm_clock_set; |
219 | engine->crypt.init = nouveau_stub_init; | ||
220 | engine->crypt.takedown = nouveau_stub_takedown; | ||
221 | engine->vram.init = nouveau_mem_detect; | ||
222 | engine->vram.flags_valid = nouveau_mem_flags_valid; | ||
210 | break; | 223 | break; |
211 | case 0x30: | 224 | case 0x30: |
212 | engine->instmem.init = nv04_instmem_init; | 225 | engine->instmem.init = nv04_instmem_init; |
213 | engine->instmem.takedown = nv04_instmem_takedown; | 226 | engine->instmem.takedown = nv04_instmem_takedown; |
214 | engine->instmem.suspend = nv04_instmem_suspend; | 227 | engine->instmem.suspend = nv04_instmem_suspend; |
215 | engine->instmem.resume = nv04_instmem_resume; | 228 | engine->instmem.resume = nv04_instmem_resume; |
216 | engine->instmem.populate = nv04_instmem_populate; | 229 | engine->instmem.get = nv04_instmem_get; |
217 | engine->instmem.clear = nv04_instmem_clear; | 230 | engine->instmem.put = nv04_instmem_put; |
218 | engine->instmem.bind = nv04_instmem_bind; | 231 | engine->instmem.map = nv04_instmem_map; |
219 | engine->instmem.unbind = nv04_instmem_unbind; | 232 | engine->instmem.unmap = nv04_instmem_unmap; |
220 | engine->instmem.flush = nv04_instmem_flush; | 233 | engine->instmem.flush = nv04_instmem_flush; |
221 | engine->mc.init = nv04_mc_init; | 234 | engine->mc.init = nv04_mc_init; |
222 | engine->mc.takedown = nv04_mc_takedown; | 235 | engine->mc.takedown = nv04_mc_takedown; |
@@ -225,8 +238,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
225 | engine->timer.takedown = nv04_timer_takedown; | 238 | engine->timer.takedown = nv04_timer_takedown; |
226 | engine->fb.init = nv30_fb_init; | 239 | engine->fb.init = nv30_fb_init; |
227 | engine->fb.takedown = nv30_fb_takedown; | 240 | engine->fb.takedown = nv30_fb_takedown; |
228 | engine->fb.set_region_tiling = nv10_fb_set_region_tiling; | 241 | engine->fb.init_tile_region = nv30_fb_init_tile_region; |
229 | engine->graph.grclass = nv30_graph_grclass; | 242 | engine->fb.set_tile_region = nv10_fb_set_tile_region; |
243 | engine->fb.free_tile_region = nv30_fb_free_tile_region; | ||
230 | engine->graph.init = nv30_graph_init; | 244 | engine->graph.init = nv30_graph_init; |
231 | engine->graph.takedown = nv20_graph_takedown; | 245 | engine->graph.takedown = nv20_graph_takedown; |
232 | engine->graph.fifo_access = nv04_graph_fifo_access; | 246 | engine->graph.fifo_access = nv04_graph_fifo_access; |
@@ -235,17 +249,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
235 | engine->graph.destroy_context = nv20_graph_destroy_context; | 249 | engine->graph.destroy_context = nv20_graph_destroy_context; |
236 | engine->graph.load_context = nv20_graph_load_context; | 250 | engine->graph.load_context = nv20_graph_load_context; |
237 | engine->graph.unload_context = nv20_graph_unload_context; | 251 | engine->graph.unload_context = nv20_graph_unload_context; |
238 | engine->graph.set_region_tiling = nv20_graph_set_region_tiling; | 252 | engine->graph.set_tile_region = nv20_graph_set_tile_region; |
239 | engine->fifo.channels = 32; | 253 | engine->fifo.channels = 32; |
240 | engine->fifo.init = nv10_fifo_init; | 254 | engine->fifo.init = nv10_fifo_init; |
241 | engine->fifo.takedown = nouveau_stub_takedown; | 255 | engine->fifo.takedown = nv04_fifo_fini; |
242 | engine->fifo.disable = nv04_fifo_disable; | 256 | engine->fifo.disable = nv04_fifo_disable; |
243 | engine->fifo.enable = nv04_fifo_enable; | 257 | engine->fifo.enable = nv04_fifo_enable; |
244 | engine->fifo.reassign = nv04_fifo_reassign; | 258 | engine->fifo.reassign = nv04_fifo_reassign; |
245 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | 259 | engine->fifo.cache_pull = nv04_fifo_cache_pull; |
246 | engine->fifo.channel_id = nv10_fifo_channel_id; | 260 | engine->fifo.channel_id = nv10_fifo_channel_id; |
247 | engine->fifo.create_context = nv10_fifo_create_context; | 261 | engine->fifo.create_context = nv10_fifo_create_context; |
248 | engine->fifo.destroy_context = nv10_fifo_destroy_context; | 262 | engine->fifo.destroy_context = nv04_fifo_destroy_context; |
249 | engine->fifo.load_context = nv10_fifo_load_context; | 263 | engine->fifo.load_context = nv10_fifo_load_context; |
250 | engine->fifo.unload_context = nv10_fifo_unload_context; | 264 | engine->fifo.unload_context = nv10_fifo_unload_context; |
251 | engine->display.early_init = nv04_display_early_init; | 265 | engine->display.early_init = nv04_display_early_init; |
@@ -263,6 +277,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
263 | engine->pm.clock_set = nv04_pm_clock_set; | 277 | engine->pm.clock_set = nv04_pm_clock_set; |
264 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | 278 | engine->pm.voltage_get = nouveau_voltage_gpio_get; |
265 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | 279 | engine->pm.voltage_set = nouveau_voltage_gpio_set; |
280 | engine->crypt.init = nouveau_stub_init; | ||
281 | engine->crypt.takedown = nouveau_stub_takedown; | ||
282 | engine->vram.init = nouveau_mem_detect; | ||
283 | engine->vram.flags_valid = nouveau_mem_flags_valid; | ||
266 | break; | 284 | break; |
267 | case 0x40: | 285 | case 0x40: |
268 | case 0x60: | 286 | case 0x60: |
@@ -270,10 +288,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
270 | engine->instmem.takedown = nv04_instmem_takedown; | 288 | engine->instmem.takedown = nv04_instmem_takedown; |
271 | engine->instmem.suspend = nv04_instmem_suspend; | 289 | engine->instmem.suspend = nv04_instmem_suspend; |
272 | engine->instmem.resume = nv04_instmem_resume; | 290 | engine->instmem.resume = nv04_instmem_resume; |
273 | engine->instmem.populate = nv04_instmem_populate; | 291 | engine->instmem.get = nv04_instmem_get; |
274 | engine->instmem.clear = nv04_instmem_clear; | 292 | engine->instmem.put = nv04_instmem_put; |
275 | engine->instmem.bind = nv04_instmem_bind; | 293 | engine->instmem.map = nv04_instmem_map; |
276 | engine->instmem.unbind = nv04_instmem_unbind; | 294 | engine->instmem.unmap = nv04_instmem_unmap; |
277 | engine->instmem.flush = nv04_instmem_flush; | 295 | engine->instmem.flush = nv04_instmem_flush; |
278 | engine->mc.init = nv40_mc_init; | 296 | engine->mc.init = nv40_mc_init; |
279 | engine->mc.takedown = nv40_mc_takedown; | 297 | engine->mc.takedown = nv40_mc_takedown; |
@@ -282,8 +300,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
282 | engine->timer.takedown = nv04_timer_takedown; | 300 | engine->timer.takedown = nv04_timer_takedown; |
283 | engine->fb.init = nv40_fb_init; | 301 | engine->fb.init = nv40_fb_init; |
284 | engine->fb.takedown = nv40_fb_takedown; | 302 | engine->fb.takedown = nv40_fb_takedown; |
285 | engine->fb.set_region_tiling = nv40_fb_set_region_tiling; | 303 | engine->fb.init_tile_region = nv30_fb_init_tile_region; |
286 | engine->graph.grclass = nv40_graph_grclass; | 304 | engine->fb.set_tile_region = nv40_fb_set_tile_region; |
305 | engine->fb.free_tile_region = nv30_fb_free_tile_region; | ||
287 | engine->graph.init = nv40_graph_init; | 306 | engine->graph.init = nv40_graph_init; |
288 | engine->graph.takedown = nv40_graph_takedown; | 307 | engine->graph.takedown = nv40_graph_takedown; |
289 | engine->graph.fifo_access = nv04_graph_fifo_access; | 308 | engine->graph.fifo_access = nv04_graph_fifo_access; |
@@ -292,17 +311,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
292 | engine->graph.destroy_context = nv40_graph_destroy_context; | 311 | engine->graph.destroy_context = nv40_graph_destroy_context; |
293 | engine->graph.load_context = nv40_graph_load_context; | 312 | engine->graph.load_context = nv40_graph_load_context; |
294 | engine->graph.unload_context = nv40_graph_unload_context; | 313 | engine->graph.unload_context = nv40_graph_unload_context; |
295 | engine->graph.set_region_tiling = nv40_graph_set_region_tiling; | 314 | engine->graph.set_tile_region = nv40_graph_set_tile_region; |
296 | engine->fifo.channels = 32; | 315 | engine->fifo.channels = 32; |
297 | engine->fifo.init = nv40_fifo_init; | 316 | engine->fifo.init = nv40_fifo_init; |
298 | engine->fifo.takedown = nouveau_stub_takedown; | 317 | engine->fifo.takedown = nv04_fifo_fini; |
299 | engine->fifo.disable = nv04_fifo_disable; | 318 | engine->fifo.disable = nv04_fifo_disable; |
300 | engine->fifo.enable = nv04_fifo_enable; | 319 | engine->fifo.enable = nv04_fifo_enable; |
301 | engine->fifo.reassign = nv04_fifo_reassign; | 320 | engine->fifo.reassign = nv04_fifo_reassign; |
302 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | 321 | engine->fifo.cache_pull = nv04_fifo_cache_pull; |
303 | engine->fifo.channel_id = nv10_fifo_channel_id; | 322 | engine->fifo.channel_id = nv10_fifo_channel_id; |
304 | engine->fifo.create_context = nv40_fifo_create_context; | 323 | engine->fifo.create_context = nv40_fifo_create_context; |
305 | engine->fifo.destroy_context = nv40_fifo_destroy_context; | 324 | engine->fifo.destroy_context = nv04_fifo_destroy_context; |
306 | engine->fifo.load_context = nv40_fifo_load_context; | 325 | engine->fifo.load_context = nv40_fifo_load_context; |
307 | engine->fifo.unload_context = nv40_fifo_unload_context; | 326 | engine->fifo.unload_context = nv40_fifo_unload_context; |
308 | engine->display.early_init = nv04_display_early_init; | 327 | engine->display.early_init = nv04_display_early_init; |
@@ -321,6 +340,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
321 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | 340 | engine->pm.voltage_get = nouveau_voltage_gpio_get; |
322 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | 341 | engine->pm.voltage_set = nouveau_voltage_gpio_set; |
323 | engine->pm.temp_get = nv40_temp_get; | 342 | engine->pm.temp_get = nv40_temp_get; |
343 | engine->crypt.init = nouveau_stub_init; | ||
344 | engine->crypt.takedown = nouveau_stub_takedown; | ||
345 | engine->vram.init = nouveau_mem_detect; | ||
346 | engine->vram.flags_valid = nouveau_mem_flags_valid; | ||
324 | break; | 347 | break; |
325 | case 0x50: | 348 | case 0x50: |
326 | case 0x80: /* gotta love NVIDIA's consistency.. */ | 349 | case 0x80: /* gotta love NVIDIA's consistency.. */ |
@@ -330,10 +353,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
330 | engine->instmem.takedown = nv50_instmem_takedown; | 353 | engine->instmem.takedown = nv50_instmem_takedown; |
331 | engine->instmem.suspend = nv50_instmem_suspend; | 354 | engine->instmem.suspend = nv50_instmem_suspend; |
332 | engine->instmem.resume = nv50_instmem_resume; | 355 | engine->instmem.resume = nv50_instmem_resume; |
333 | engine->instmem.populate = nv50_instmem_populate; | 356 | engine->instmem.get = nv50_instmem_get; |
334 | engine->instmem.clear = nv50_instmem_clear; | 357 | engine->instmem.put = nv50_instmem_put; |
335 | engine->instmem.bind = nv50_instmem_bind; | 358 | engine->instmem.map = nv50_instmem_map; |
336 | engine->instmem.unbind = nv50_instmem_unbind; | 359 | engine->instmem.unmap = nv50_instmem_unmap; |
337 | if (dev_priv->chipset == 0x50) | 360 | if (dev_priv->chipset == 0x50) |
338 | engine->instmem.flush = nv50_instmem_flush; | 361 | engine->instmem.flush = nv50_instmem_flush; |
339 | else | 362 | else |
@@ -345,7 +368,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
345 | engine->timer.takedown = nv04_timer_takedown; | 368 | engine->timer.takedown = nv04_timer_takedown; |
346 | engine->fb.init = nv50_fb_init; | 369 | engine->fb.init = nv50_fb_init; |
347 | engine->fb.takedown = nv50_fb_takedown; | 370 | engine->fb.takedown = nv50_fb_takedown; |
348 | engine->graph.grclass = nv50_graph_grclass; | ||
349 | engine->graph.init = nv50_graph_init; | 371 | engine->graph.init = nv50_graph_init; |
350 | engine->graph.takedown = nv50_graph_takedown; | 372 | engine->graph.takedown = nv50_graph_takedown; |
351 | engine->graph.fifo_access = nv50_graph_fifo_access; | 373 | engine->graph.fifo_access = nv50_graph_fifo_access; |
@@ -381,24 +403,32 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
381 | engine->display.init = nv50_display_init; | 403 | engine->display.init = nv50_display_init; |
382 | engine->display.destroy = nv50_display_destroy; | 404 | engine->display.destroy = nv50_display_destroy; |
383 | engine->gpio.init = nv50_gpio_init; | 405 | engine->gpio.init = nv50_gpio_init; |
384 | engine->gpio.takedown = nouveau_stub_takedown; | 406 | engine->gpio.takedown = nv50_gpio_fini; |
385 | engine->gpio.get = nv50_gpio_get; | 407 | engine->gpio.get = nv50_gpio_get; |
386 | engine->gpio.set = nv50_gpio_set; | 408 | engine->gpio.set = nv50_gpio_set; |
409 | engine->gpio.irq_register = nv50_gpio_irq_register; | ||
410 | engine->gpio.irq_unregister = nv50_gpio_irq_unregister; | ||
387 | engine->gpio.irq_enable = nv50_gpio_irq_enable; | 411 | engine->gpio.irq_enable = nv50_gpio_irq_enable; |
388 | switch (dev_priv->chipset) { | 412 | switch (dev_priv->chipset) { |
389 | case 0xa3: | 413 | case 0x84: |
390 | case 0xa5: | 414 | case 0x86: |
391 | case 0xa8: | 415 | case 0x92: |
392 | case 0xaf: | 416 | case 0x94: |
393 | engine->pm.clock_get = nva3_pm_clock_get; | 417 | case 0x96: |
394 | engine->pm.clock_pre = nva3_pm_clock_pre; | 418 | case 0x98: |
395 | engine->pm.clock_set = nva3_pm_clock_set; | 419 | case 0xa0: |
396 | break; | 420 | case 0xaa: |
397 | default: | 421 | case 0xac: |
422 | case 0x50: | ||
398 | engine->pm.clock_get = nv50_pm_clock_get; | 423 | engine->pm.clock_get = nv50_pm_clock_get; |
399 | engine->pm.clock_pre = nv50_pm_clock_pre; | 424 | engine->pm.clock_pre = nv50_pm_clock_pre; |
400 | engine->pm.clock_set = nv50_pm_clock_set; | 425 | engine->pm.clock_set = nv50_pm_clock_set; |
401 | break; | 426 | break; |
427 | default: | ||
428 | engine->pm.clock_get = nva3_pm_clock_get; | ||
429 | engine->pm.clock_pre = nva3_pm_clock_pre; | ||
430 | engine->pm.clock_set = nva3_pm_clock_set; | ||
431 | break; | ||
402 | } | 432 | } |
403 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | 433 | engine->pm.voltage_get = nouveau_voltage_gpio_get; |
404 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | 434 | engine->pm.voltage_set = nouveau_voltage_gpio_set; |
@@ -406,16 +436,38 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
406 | engine->pm.temp_get = nv84_temp_get; | 436 | engine->pm.temp_get = nv84_temp_get; |
407 | else | 437 | else |
408 | engine->pm.temp_get = nv40_temp_get; | 438 | engine->pm.temp_get = nv40_temp_get; |
439 | switch (dev_priv->chipset) { | ||
440 | case 0x84: | ||
441 | case 0x86: | ||
442 | case 0x92: | ||
443 | case 0x94: | ||
444 | case 0x96: | ||
445 | case 0xa0: | ||
446 | engine->crypt.init = nv84_crypt_init; | ||
447 | engine->crypt.takedown = nv84_crypt_fini; | ||
448 | engine->crypt.create_context = nv84_crypt_create_context; | ||
449 | engine->crypt.destroy_context = nv84_crypt_destroy_context; | ||
450 | engine->crypt.tlb_flush = nv84_crypt_tlb_flush; | ||
451 | break; | ||
452 | default: | ||
453 | engine->crypt.init = nouveau_stub_init; | ||
454 | engine->crypt.takedown = nouveau_stub_takedown; | ||
455 | break; | ||
456 | } | ||
457 | engine->vram.init = nv50_vram_init; | ||
458 | engine->vram.get = nv50_vram_new; | ||
459 | engine->vram.put = nv50_vram_del; | ||
460 | engine->vram.flags_valid = nv50_vram_flags_valid; | ||
409 | break; | 461 | break; |
410 | case 0xC0: | 462 | case 0xC0: |
411 | engine->instmem.init = nvc0_instmem_init; | 463 | engine->instmem.init = nvc0_instmem_init; |
412 | engine->instmem.takedown = nvc0_instmem_takedown; | 464 | engine->instmem.takedown = nvc0_instmem_takedown; |
413 | engine->instmem.suspend = nvc0_instmem_suspend; | 465 | engine->instmem.suspend = nvc0_instmem_suspend; |
414 | engine->instmem.resume = nvc0_instmem_resume; | 466 | engine->instmem.resume = nvc0_instmem_resume; |
415 | engine->instmem.populate = nvc0_instmem_populate; | 467 | engine->instmem.get = nvc0_instmem_get; |
416 | engine->instmem.clear = nvc0_instmem_clear; | 468 | engine->instmem.put = nvc0_instmem_put; |
417 | engine->instmem.bind = nvc0_instmem_bind; | 469 | engine->instmem.map = nvc0_instmem_map; |
418 | engine->instmem.unbind = nvc0_instmem_unbind; | 470 | engine->instmem.unmap = nvc0_instmem_unmap; |
419 | engine->instmem.flush = nvc0_instmem_flush; | 471 | engine->instmem.flush = nvc0_instmem_flush; |
420 | engine->mc.init = nv50_mc_init; | 472 | engine->mc.init = nv50_mc_init; |
421 | engine->mc.takedown = nv50_mc_takedown; | 473 | engine->mc.takedown = nv50_mc_takedown; |
@@ -424,7 +476,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
424 | engine->timer.takedown = nv04_timer_takedown; | 476 | engine->timer.takedown = nv04_timer_takedown; |
425 | engine->fb.init = nvc0_fb_init; | 477 | engine->fb.init = nvc0_fb_init; |
426 | engine->fb.takedown = nvc0_fb_takedown; | 478 | engine->fb.takedown = nvc0_fb_takedown; |
427 | engine->graph.grclass = NULL; //nvc0_graph_grclass; | ||
428 | engine->graph.init = nvc0_graph_init; | 479 | engine->graph.init = nvc0_graph_init; |
429 | engine->graph.takedown = nvc0_graph_takedown; | 480 | engine->graph.takedown = nvc0_graph_takedown; |
430 | engine->graph.fifo_access = nvc0_graph_fifo_access; | 481 | engine->graph.fifo_access = nvc0_graph_fifo_access; |
@@ -453,7 +504,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
453 | engine->gpio.takedown = nouveau_stub_takedown; | 504 | engine->gpio.takedown = nouveau_stub_takedown; |
454 | engine->gpio.get = nv50_gpio_get; | 505 | engine->gpio.get = nv50_gpio_get; |
455 | engine->gpio.set = nv50_gpio_set; | 506 | engine->gpio.set = nv50_gpio_set; |
507 | engine->gpio.irq_register = nv50_gpio_irq_register; | ||
508 | engine->gpio.irq_unregister = nv50_gpio_irq_unregister; | ||
456 | engine->gpio.irq_enable = nv50_gpio_irq_enable; | 509 | engine->gpio.irq_enable = nv50_gpio_irq_enable; |
510 | engine->crypt.init = nouveau_stub_init; | ||
511 | engine->crypt.takedown = nouveau_stub_takedown; | ||
512 | engine->vram.init = nouveau_mem_detect; | ||
513 | engine->vram.flags_valid = nouveau_mem_flags_valid; | ||
457 | break; | 514 | break; |
458 | default: | 515 | default: |
459 | NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); | 516 | NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); |
@@ -495,7 +552,7 @@ nouveau_card_init_channel(struct drm_device *dev) | |||
495 | 552 | ||
496 | ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, | 553 | ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, |
497 | 0, dev_priv->vram_size, | 554 | 0, dev_priv->vram_size, |
498 | NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, | 555 | NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM, |
499 | &gpuobj); | 556 | &gpuobj); |
500 | if (ret) | 557 | if (ret) |
501 | goto out_err; | 558 | goto out_err; |
@@ -505,9 +562,10 @@ nouveau_card_init_channel(struct drm_device *dev) | |||
505 | if (ret) | 562 | if (ret) |
506 | goto out_err; | 563 | goto out_err; |
507 | 564 | ||
508 | ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, | 565 | ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, |
509 | dev_priv->gart_info.aper_size, | 566 | 0, dev_priv->gart_info.aper_size, |
510 | NV_DMA_ACCESS_RW, &gpuobj, NULL); | 567 | NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART, |
568 | &gpuobj); | ||
511 | if (ret) | 569 | if (ret) |
512 | goto out_err; | 570 | goto out_err; |
513 | 571 | ||
@@ -516,11 +574,11 @@ nouveau_card_init_channel(struct drm_device *dev) | |||
516 | if (ret) | 574 | if (ret) |
517 | goto out_err; | 575 | goto out_err; |
518 | 576 | ||
577 | mutex_unlock(&dev_priv->channel->mutex); | ||
519 | return 0; | 578 | return 0; |
520 | 579 | ||
521 | out_err: | 580 | out_err: |
522 | nouveau_channel_free(dev_priv->channel); | 581 | nouveau_channel_put(&dev_priv->channel); |
523 | dev_priv->channel = NULL; | ||
524 | return ret; | 582 | return ret; |
525 | } | 583 | } |
526 | 584 | ||
@@ -567,6 +625,8 @@ nouveau_card_init(struct drm_device *dev) | |||
567 | if (ret) | 625 | if (ret) |
568 | goto out; | 626 | goto out; |
569 | engine = &dev_priv->engine; | 627 | engine = &dev_priv->engine; |
628 | spin_lock_init(&dev_priv->channels.lock); | ||
629 | spin_lock_init(&dev_priv->tile.lock); | ||
570 | spin_lock_init(&dev_priv->context_switch_lock); | 630 | spin_lock_init(&dev_priv->context_switch_lock); |
571 | 631 | ||
572 | /* Make the CRTCs and I2C buses accessible */ | 632 | /* Make the CRTCs and I2C buses accessible */ |
@@ -625,26 +685,28 @@ nouveau_card_init(struct drm_device *dev) | |||
625 | if (ret) | 685 | if (ret) |
626 | goto out_fb; | 686 | goto out_fb; |
627 | 687 | ||
688 | /* PCRYPT */ | ||
689 | ret = engine->crypt.init(dev); | ||
690 | if (ret) | ||
691 | goto out_graph; | ||
692 | |||
628 | /* PFIFO */ | 693 | /* PFIFO */ |
629 | ret = engine->fifo.init(dev); | 694 | ret = engine->fifo.init(dev); |
630 | if (ret) | 695 | if (ret) |
631 | goto out_graph; | 696 | goto out_crypt; |
632 | } | 697 | } |
633 | 698 | ||
634 | ret = engine->display.create(dev); | 699 | ret = engine->display.create(dev); |
635 | if (ret) | 700 | if (ret) |
636 | goto out_fifo; | 701 | goto out_fifo; |
637 | 702 | ||
638 | /* this call irq_preinstall, register irq handler and | 703 | ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1); |
639 | * call irq_postinstall | ||
640 | */ | ||
641 | ret = drm_irq_install(dev); | ||
642 | if (ret) | 704 | if (ret) |
643 | goto out_display; | 705 | goto out_vblank; |
644 | 706 | ||
645 | ret = drm_vblank_init(dev, 0); | 707 | ret = nouveau_irq_init(dev); |
646 | if (ret) | 708 | if (ret) |
647 | goto out_irq; | 709 | goto out_vblank; |
648 | 710 | ||
649 | /* what about PVIDEO/PCRTC/PRAMDAC etc? */ | 711 | /* what about PVIDEO/PCRTC/PRAMDAC etc? */ |
650 | 712 | ||
@@ -669,12 +731,16 @@ nouveau_card_init(struct drm_device *dev) | |||
669 | out_fence: | 731 | out_fence: |
670 | nouveau_fence_fini(dev); | 732 | nouveau_fence_fini(dev); |
671 | out_irq: | 733 | out_irq: |
672 | drm_irq_uninstall(dev); | 734 | nouveau_irq_fini(dev); |
673 | out_display: | 735 | out_vblank: |
736 | drm_vblank_cleanup(dev); | ||
674 | engine->display.destroy(dev); | 737 | engine->display.destroy(dev); |
675 | out_fifo: | 738 | out_fifo: |
676 | if (!nouveau_noaccel) | 739 | if (!nouveau_noaccel) |
677 | engine->fifo.takedown(dev); | 740 | engine->fifo.takedown(dev); |
741 | out_crypt: | ||
742 | if (!nouveau_noaccel) | ||
743 | engine->crypt.takedown(dev); | ||
678 | out_graph: | 744 | out_graph: |
679 | if (!nouveau_noaccel) | 745 | if (!nouveau_noaccel) |
680 | engine->graph.takedown(dev); | 746 | engine->graph.takedown(dev); |
@@ -713,12 +779,12 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
713 | 779 | ||
714 | if (!engine->graph.accel_blocked) { | 780 | if (!engine->graph.accel_blocked) { |
715 | nouveau_fence_fini(dev); | 781 | nouveau_fence_fini(dev); |
716 | nouveau_channel_free(dev_priv->channel); | 782 | nouveau_channel_put_unlocked(&dev_priv->channel); |
717 | dev_priv->channel = NULL; | ||
718 | } | 783 | } |
719 | 784 | ||
720 | if (!nouveau_noaccel) { | 785 | if (!nouveau_noaccel) { |
721 | engine->fifo.takedown(dev); | 786 | engine->fifo.takedown(dev); |
787 | engine->crypt.takedown(dev); | ||
722 | engine->graph.takedown(dev); | 788 | engine->graph.takedown(dev); |
723 | } | 789 | } |
724 | engine->fb.takedown(dev); | 790 | engine->fb.takedown(dev); |
@@ -737,7 +803,8 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
737 | nouveau_gpuobj_takedown(dev); | 803 | nouveau_gpuobj_takedown(dev); |
738 | nouveau_mem_vram_fini(dev); | 804 | nouveau_mem_vram_fini(dev); |
739 | 805 | ||
740 | drm_irq_uninstall(dev); | 806 | nouveau_irq_fini(dev); |
807 | drm_vblank_cleanup(dev); | ||
741 | 808 | ||
742 | nouveau_pm_fini(dev); | 809 | nouveau_pm_fini(dev); |
743 | nouveau_bios_takedown(dev); | 810 | nouveau_bios_takedown(dev); |
@@ -1024,21 +1091,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
1024 | else | 1091 | else |
1025 | getparam->value = NV_PCI; | 1092 | getparam->value = NV_PCI; |
1026 | break; | 1093 | break; |
1027 | case NOUVEAU_GETPARAM_FB_PHYSICAL: | ||
1028 | getparam->value = dev_priv->fb_phys; | ||
1029 | break; | ||
1030 | case NOUVEAU_GETPARAM_AGP_PHYSICAL: | ||
1031 | getparam->value = dev_priv->gart_info.aper_base; | ||
1032 | break; | ||
1033 | case NOUVEAU_GETPARAM_PCI_PHYSICAL: | ||
1034 | if (dev->sg) { | ||
1035 | getparam->value = (unsigned long)dev->sg->virtual; | ||
1036 | } else { | ||
1037 | NV_ERROR(dev, "Requested PCIGART address, " | ||
1038 | "while no PCIGART was created\n"); | ||
1039 | return -EINVAL; | ||
1040 | } | ||
1041 | break; | ||
1042 | case NOUVEAU_GETPARAM_FB_SIZE: | 1094 | case NOUVEAU_GETPARAM_FB_SIZE: |
1043 | getparam->value = dev_priv->fb_available_size; | 1095 | getparam->value = dev_priv->fb_available_size; |
1044 | break; | 1096 | break; |
@@ -1046,7 +1098,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
1046 | getparam->value = dev_priv->gart_info.aper_size; | 1098 | getparam->value = dev_priv->gart_info.aper_size; |
1047 | break; | 1099 | break; |
1048 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: | 1100 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: |
1049 | getparam->value = dev_priv->vm_vram_base; | 1101 | getparam->value = 0; /* deprecated */ |
1050 | break; | 1102 | break; |
1051 | case NOUVEAU_GETPARAM_PTIMER_TIME: | 1103 | case NOUVEAU_GETPARAM_PTIMER_TIME: |
1052 | getparam->value = dev_priv->engine.timer.read(dev); | 1104 | getparam->value = dev_priv->engine.timer.read(dev); |
@@ -1054,6 +1106,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
1054 | case NOUVEAU_GETPARAM_HAS_BO_USAGE: | 1106 | case NOUVEAU_GETPARAM_HAS_BO_USAGE: |
1055 | getparam->value = 1; | 1107 | getparam->value = 1; |
1056 | break; | 1108 | break; |
1109 | case NOUVEAU_GETPARAM_HAS_PAGEFLIP: | ||
1110 | getparam->value = (dev_priv->card_type < NV_50); | ||
1111 | break; | ||
1057 | case NOUVEAU_GETPARAM_GRAPH_UNITS: | 1112 | case NOUVEAU_GETPARAM_GRAPH_UNITS: |
1058 | /* NV40 and NV50 versions are quite different, but register | 1113 | /* NV40 and NV50 versions are quite different, but register |
1059 | * address is the same. User is supposed to know the card | 1114 | * address is the same. User is supposed to know the card |
@@ -1087,8 +1142,9 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data, | |||
1087 | } | 1142 | } |
1088 | 1143 | ||
1089 | /* Wait until (value(reg) & mask) == val, up until timeout has hit */ | 1144 | /* Wait until (value(reg) & mask) == val, up until timeout has hit */ |
1090 | bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, | 1145 | bool |
1091 | uint32_t reg, uint32_t mask, uint32_t val) | 1146 | nouveau_wait_eq(struct drm_device *dev, uint64_t timeout, |
1147 | uint32_t reg, uint32_t mask, uint32_t val) | ||
1092 | { | 1148 | { |
1093 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 1149 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
1094 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | 1150 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; |
@@ -1102,10 +1158,33 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, | |||
1102 | return false; | 1158 | return false; |
1103 | } | 1159 | } |
1104 | 1160 | ||
1161 | /* Wait until (value(reg) & mask) != val, up until timeout has hit */ | ||
1162 | bool | ||
1163 | nouveau_wait_ne(struct drm_device *dev, uint64_t timeout, | ||
1164 | uint32_t reg, uint32_t mask, uint32_t val) | ||
1165 | { | ||
1166 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1167 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
1168 | uint64_t start = ptimer->read(dev); | ||
1169 | |||
1170 | do { | ||
1171 | if ((nv_rd32(dev, reg) & mask) != val) | ||
1172 | return true; | ||
1173 | } while (ptimer->read(dev) - start < timeout); | ||
1174 | |||
1175 | return false; | ||
1176 | } | ||
1177 | |||
1105 | /* Waits for PGRAPH to go completely idle */ | 1178 | /* Waits for PGRAPH to go completely idle */ |
1106 | bool nouveau_wait_for_idle(struct drm_device *dev) | 1179 | bool nouveau_wait_for_idle(struct drm_device *dev) |
1107 | { | 1180 | { |
1108 | if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { | 1181 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
1182 | uint32_t mask = ~0; | ||
1183 | |||
1184 | if (dev_priv->card_type == NV_40) | ||
1185 | mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL; | ||
1186 | |||
1187 | if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) { | ||
1109 | NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", | 1188 | NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", |
1110 | nv_rd32(dev, NV04_PGRAPH_STATUS)); | 1189 | nv_rd32(dev, NV04_PGRAPH_STATUS)); |
1111 | return false; | 1190 | return false; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c new file mode 100644 index 000000000000..fbe0fb13bc1e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_util.c | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Nouveau Project | ||
3 | * | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining | ||
7 | * a copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial | ||
16 | * portions of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
21 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #include <linux/ratelimit.h> | ||
29 | |||
30 | #include "nouveau_util.h" | ||
31 | |||
32 | static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); | ||
33 | |||
34 | void | ||
35 | nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value) | ||
36 | { | ||
37 | while (bf->name) { | ||
38 | if (value & bf->mask) { | ||
39 | printk(" %s", bf->name); | ||
40 | value &= ~bf->mask; | ||
41 | } | ||
42 | |||
43 | bf++; | ||
44 | } | ||
45 | |||
46 | if (value) | ||
47 | printk(" (unknown bits 0x%08x)", value); | ||
48 | } | ||
49 | |||
50 | void | ||
51 | nouveau_enum_print(const struct nouveau_enum *en, u32 value) | ||
52 | { | ||
53 | while (en->name) { | ||
54 | if (value == en->value) { | ||
55 | printk("%s", en->name); | ||
56 | return; | ||
57 | } | ||
58 | |||
59 | en++; | ||
60 | } | ||
61 | |||
62 | printk("(unknown enum 0x%08x)", value); | ||
63 | } | ||
64 | |||
65 | int | ||
66 | nouveau_ratelimit(void) | ||
67 | { | ||
68 | return __ratelimit(&nouveau_ratelimit_state); | ||
69 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h new file mode 100644 index 000000000000..d9ceaea26f4b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_util.h | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Nouveau Project | ||
3 | * | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining | ||
7 | * a copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial | ||
16 | * portions of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
21 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #ifndef __NOUVEAU_UTIL_H__ | ||
29 | #define __NOUVEAU_UTIL_H__ | ||
30 | |||
31 | struct nouveau_bitfield { | ||
32 | u32 mask; | ||
33 | const char *name; | ||
34 | }; | ||
35 | |||
36 | struct nouveau_enum { | ||
37 | u32 value; | ||
38 | const char *name; | ||
39 | }; | ||
40 | |||
41 | void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value); | ||
42 | void nouveau_enum_print(const struct nouveau_enum *, u32 value); | ||
43 | int nouveau_ratelimit(void); | ||
44 | |||
45 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c new file mode 100644 index 000000000000..07ab1749cf7d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c | |||
@@ -0,0 +1,421 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_mm.h" | ||
28 | #include "nouveau_vm.h" | ||
29 | |||
30 | void | ||
31 | nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) | ||
32 | { | ||
33 | struct nouveau_vm *vm = vma->vm; | ||
34 | struct nouveau_mm_node *r; | ||
35 | u32 offset = vma->node->offset + (delta >> 12); | ||
36 | u32 bits = vma->node->type - 12; | ||
37 | u32 pde = (offset >> vm->pgt_bits) - vm->fpde; | ||
38 | u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; | ||
39 | u32 max = 1 << (vm->pgt_bits - bits); | ||
40 | u32 end, len; | ||
41 | |||
42 | list_for_each_entry(r, &vram->regions, rl_entry) { | ||
43 | u64 phys = (u64)r->offset << 12; | ||
44 | u32 num = r->length >> bits; | ||
45 | |||
46 | while (num) { | ||
47 | struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; | ||
48 | |||
49 | end = (pte + num); | ||
50 | if (unlikely(end >= max)) | ||
51 | end = max; | ||
52 | len = end - pte; | ||
53 | |||
54 | vm->map(vma, pgt, vram, pte, len, phys); | ||
55 | |||
56 | num -= len; | ||
57 | pte += len; | ||
58 | if (unlikely(end >= max)) { | ||
59 | pde++; | ||
60 | pte = 0; | ||
61 | } | ||
62 | } | ||
63 | } | ||
64 | |||
65 | vm->flush(vm); | ||
66 | } | ||
67 | |||
68 | void | ||
69 | nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram) | ||
70 | { | ||
71 | nouveau_vm_map_at(vma, 0, vram); | ||
72 | } | ||
73 | |||
74 | void | ||
75 | nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, | ||
76 | dma_addr_t *list) | ||
77 | { | ||
78 | struct nouveau_vm *vm = vma->vm; | ||
79 | u32 offset = vma->node->offset + (delta >> 12); | ||
80 | u32 bits = vma->node->type - 12; | ||
81 | u32 num = length >> vma->node->type; | ||
82 | u32 pde = (offset >> vm->pgt_bits) - vm->fpde; | ||
83 | u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; | ||
84 | u32 max = 1 << (vm->pgt_bits - bits); | ||
85 | u32 end, len; | ||
86 | |||
87 | while (num) { | ||
88 | struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; | ||
89 | |||
90 | end = (pte + num); | ||
91 | if (unlikely(end >= max)) | ||
92 | end = max; | ||
93 | len = end - pte; | ||
94 | |||
95 | vm->map_sg(vma, pgt, pte, list, len); | ||
96 | |||
97 | num -= len; | ||
98 | pte += len; | ||
99 | list += len; | ||
100 | if (unlikely(end >= max)) { | ||
101 | pde++; | ||
102 | pte = 0; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | vm->flush(vm); | ||
107 | } | ||
108 | |||
109 | void | ||
110 | nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) | ||
111 | { | ||
112 | struct nouveau_vm *vm = vma->vm; | ||
113 | u32 offset = vma->node->offset + (delta >> 12); | ||
114 | u32 bits = vma->node->type - 12; | ||
115 | u32 num = length >> vma->node->type; | ||
116 | u32 pde = (offset >> vm->pgt_bits) - vm->fpde; | ||
117 | u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; | ||
118 | u32 max = 1 << (vm->pgt_bits - bits); | ||
119 | u32 end, len; | ||
120 | |||
121 | while (num) { | ||
122 | struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; | ||
123 | |||
124 | end = (pte + num); | ||
125 | if (unlikely(end >= max)) | ||
126 | end = max; | ||
127 | len = end - pte; | ||
128 | |||
129 | vm->unmap(pgt, pte, len); | ||
130 | |||
131 | num -= len; | ||
132 | pte += len; | ||
133 | if (unlikely(end >= max)) { | ||
134 | pde++; | ||
135 | pte = 0; | ||
136 | } | ||
137 | } | ||
138 | |||
139 | vm->flush(vm); | ||
140 | } | ||
141 | |||
142 | void | ||
143 | nouveau_vm_unmap(struct nouveau_vma *vma) | ||
144 | { | ||
145 | nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); | ||
146 | } | ||
147 | |||
148 | static void | ||
149 | nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde) | ||
150 | { | ||
151 | struct nouveau_vm_pgd *vpgd; | ||
152 | struct nouveau_vm_pgt *vpgt; | ||
153 | struct nouveau_gpuobj *pgt; | ||
154 | u32 pde; | ||
155 | |||
156 | for (pde = fpde; pde <= lpde; pde++) { | ||
157 | vpgt = &vm->pgt[pde - vm->fpde]; | ||
158 | if (--vpgt->refcount) | ||
159 | continue; | ||
160 | |||
161 | list_for_each_entry(vpgd, &vm->pgd_list, head) { | ||
162 | vm->unmap_pgt(vpgd->obj, pde); | ||
163 | } | ||
164 | |||
165 | pgt = vpgt->obj; | ||
166 | vpgt->obj = NULL; | ||
167 | |||
168 | mutex_unlock(&vm->mm->mutex); | ||
169 | nouveau_gpuobj_ref(NULL, &pgt); | ||
170 | mutex_lock(&vm->mm->mutex); | ||
171 | } | ||
172 | } | ||
173 | |||
174 | static int | ||
175 | nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) | ||
176 | { | ||
177 | struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; | ||
178 | struct nouveau_vm_pgd *vpgd; | ||
179 | struct nouveau_gpuobj *pgt; | ||
180 | u32 pgt_size; | ||
181 | int ret; | ||
182 | |||
183 | pgt_size = (1 << (vm->pgt_bits + 12)) >> type; | ||
184 | pgt_size *= 8; | ||
185 | |||
186 | mutex_unlock(&vm->mm->mutex); | ||
187 | ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, | ||
188 | NVOBJ_FLAG_ZERO_ALLOC, &pgt); | ||
189 | mutex_lock(&vm->mm->mutex); | ||
190 | if (unlikely(ret)) | ||
191 | return ret; | ||
192 | |||
193 | /* someone beat us to filling the PDE while we didn't have the lock */ | ||
194 | if (unlikely(vpgt->refcount++)) { | ||
195 | mutex_unlock(&vm->mm->mutex); | ||
196 | nouveau_gpuobj_ref(NULL, &pgt); | ||
197 | mutex_lock(&vm->mm->mutex); | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | list_for_each_entry(vpgd, &vm->pgd_list, head) { | ||
202 | vm->map_pgt(vpgd->obj, type, pde, pgt); | ||
203 | } | ||
204 | |||
205 | vpgt->page_shift = type; | ||
206 | vpgt->obj = pgt; | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | int | ||
211 | nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, | ||
212 | u32 access, struct nouveau_vma *vma) | ||
213 | { | ||
214 | u32 align = (1 << page_shift) >> 12; | ||
215 | u32 msize = size >> 12; | ||
216 | u32 fpde, lpde, pde; | ||
217 | int ret; | ||
218 | |||
219 | mutex_lock(&vm->mm->mutex); | ||
220 | ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node); | ||
221 | if (unlikely(ret != 0)) { | ||
222 | mutex_unlock(&vm->mm->mutex); | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | fpde = (vma->node->offset >> vm->pgt_bits); | ||
227 | lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; | ||
228 | for (pde = fpde; pde <= lpde; pde++) { | ||
229 | struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; | ||
230 | |||
231 | if (likely(vpgt->refcount)) { | ||
232 | vpgt->refcount++; | ||
233 | continue; | ||
234 | } | ||
235 | |||
236 | ret = nouveau_vm_map_pgt(vm, pde, vma->node->type); | ||
237 | if (ret) { | ||
238 | if (pde != fpde) | ||
239 | nouveau_vm_unmap_pgt(vm, fpde, pde - 1); | ||
240 | nouveau_mm_put(vm->mm, vma->node); | ||
241 | mutex_unlock(&vm->mm->mutex); | ||
242 | vma->node = NULL; | ||
243 | return ret; | ||
244 | } | ||
245 | } | ||
246 | mutex_unlock(&vm->mm->mutex); | ||
247 | |||
248 | vma->vm = vm; | ||
249 | vma->offset = (u64)vma->node->offset << 12; | ||
250 | vma->access = access; | ||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | void | ||
255 | nouveau_vm_put(struct nouveau_vma *vma) | ||
256 | { | ||
257 | struct nouveau_vm *vm = vma->vm; | ||
258 | u32 fpde, lpde; | ||
259 | |||
260 | if (unlikely(vma->node == NULL)) | ||
261 | return; | ||
262 | fpde = (vma->node->offset >> vm->pgt_bits); | ||
263 | lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; | ||
264 | |||
265 | mutex_lock(&vm->mm->mutex); | ||
266 | nouveau_mm_put(vm->mm, vma->node); | ||
267 | vma->node = NULL; | ||
268 | nouveau_vm_unmap_pgt(vm, fpde, lpde); | ||
269 | mutex_unlock(&vm->mm->mutex); | ||
270 | } | ||
271 | |||
272 | int | ||
273 | nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, | ||
274 | u8 pgt_bits, u8 spg_shift, u8 lpg_shift, | ||
275 | struct nouveau_vm **pvm) | ||
276 | { | ||
277 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
278 | struct nouveau_vm *vm; | ||
279 | u64 mm_length = (offset + length) - mm_offset; | ||
280 | u32 block; | ||
281 | int ret; | ||
282 | |||
283 | vm = kzalloc(sizeof(*vm), GFP_KERNEL); | ||
284 | if (!vm) | ||
285 | return -ENOMEM; | ||
286 | |||
287 | if (dev_priv->card_type == NV_50) { | ||
288 | vm->map_pgt = nv50_vm_map_pgt; | ||
289 | vm->unmap_pgt = nv50_vm_unmap_pgt; | ||
290 | vm->map = nv50_vm_map; | ||
291 | vm->map_sg = nv50_vm_map_sg; | ||
292 | vm->unmap = nv50_vm_unmap; | ||
293 | vm->flush = nv50_vm_flush; | ||
294 | } else { | ||
295 | kfree(vm); | ||
296 | return -ENOSYS; | ||
297 | } | ||
298 | |||
299 | vm->fpde = offset >> pgt_bits; | ||
300 | vm->lpde = (offset + length - 1) >> pgt_bits; | ||
301 | vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL); | ||
302 | if (!vm->pgt) { | ||
303 | kfree(vm); | ||
304 | return -ENOMEM; | ||
305 | } | ||
306 | |||
307 | INIT_LIST_HEAD(&vm->pgd_list); | ||
308 | vm->dev = dev; | ||
309 | vm->refcount = 1; | ||
310 | vm->pgt_bits = pgt_bits - 12; | ||
311 | vm->spg_shift = spg_shift; | ||
312 | vm->lpg_shift = lpg_shift; | ||
313 | |||
314 | block = (1 << pgt_bits); | ||
315 | if (length < block) | ||
316 | block = length; | ||
317 | |||
318 | ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, | ||
319 | block >> 12); | ||
320 | if (ret) { | ||
321 | kfree(vm); | ||
322 | return ret; | ||
323 | } | ||
324 | |||
325 | *pvm = vm; | ||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static int | ||
330 | nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) | ||
331 | { | ||
332 | struct nouveau_vm_pgd *vpgd; | ||
333 | int i; | ||
334 | |||
335 | if (!pgd) | ||
336 | return 0; | ||
337 | |||
338 | vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL); | ||
339 | if (!vpgd) | ||
340 | return -ENOMEM; | ||
341 | |||
342 | nouveau_gpuobj_ref(pgd, &vpgd->obj); | ||
343 | |||
344 | mutex_lock(&vm->mm->mutex); | ||
345 | for (i = vm->fpde; i <= vm->lpde; i++) { | ||
346 | struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde]; | ||
347 | |||
348 | if (!vpgt->obj) { | ||
349 | vm->unmap_pgt(pgd, i); | ||
350 | continue; | ||
351 | } | ||
352 | |||
353 | vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj); | ||
354 | } | ||
355 | list_add(&vpgd->head, &vm->pgd_list); | ||
356 | mutex_unlock(&vm->mm->mutex); | ||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | static void | ||
361 | nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) | ||
362 | { | ||
363 | struct nouveau_vm_pgd *vpgd, *tmp; | ||
364 | |||
365 | if (!pgd) | ||
366 | return; | ||
367 | |||
368 | mutex_lock(&vm->mm->mutex); | ||
369 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | ||
370 | if (vpgd->obj != pgd) | ||
371 | continue; | ||
372 | |||
373 | list_del(&vpgd->head); | ||
374 | nouveau_gpuobj_ref(NULL, &vpgd->obj); | ||
375 | kfree(vpgd); | ||
376 | } | ||
377 | mutex_unlock(&vm->mm->mutex); | ||
378 | } | ||
379 | |||
380 | static void | ||
381 | nouveau_vm_del(struct nouveau_vm *vm) | ||
382 | { | ||
383 | struct nouveau_vm_pgd *vpgd, *tmp; | ||
384 | |||
385 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | ||
386 | nouveau_vm_unlink(vm, vpgd->obj); | ||
387 | } | ||
388 | WARN_ON(nouveau_mm_fini(&vm->mm) != 0); | ||
389 | |||
390 | kfree(vm->pgt); | ||
391 | kfree(vm); | ||
392 | } | ||
393 | |||
394 | int | ||
395 | nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, | ||
396 | struct nouveau_gpuobj *pgd) | ||
397 | { | ||
398 | struct nouveau_vm *vm; | ||
399 | int ret; | ||
400 | |||
401 | vm = ref; | ||
402 | if (vm) { | ||
403 | ret = nouveau_vm_link(vm, pgd); | ||
404 | if (ret) | ||
405 | return ret; | ||
406 | |||
407 | vm->refcount++; | ||
408 | } | ||
409 | |||
410 | vm = *ptr; | ||
411 | *ptr = ref; | ||
412 | |||
413 | if (vm) { | ||
414 | nouveau_vm_unlink(vm, pgd); | ||
415 | |||
416 | if (--vm->refcount == 0) | ||
417 | nouveau_vm_del(vm); | ||
418 | } | ||
419 | |||
420 | return 0; | ||
421 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h new file mode 100644 index 000000000000..b6755cfa7b71 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #ifndef __NOUVEAU_VM_H__ | ||
26 | #define __NOUVEAU_VM_H__ | ||
27 | |||
28 | #include "drmP.h" | ||
29 | |||
30 | #include "nouveau_drv.h" | ||
31 | #include "nouveau_mm.h" | ||
32 | |||
33 | struct nouveau_vm_pgt { | ||
34 | struct nouveau_gpuobj *obj; | ||
35 | u32 page_shift; | ||
36 | u32 refcount; | ||
37 | }; | ||
38 | |||
39 | struct nouveau_vm_pgd { | ||
40 | struct list_head head; | ||
41 | struct nouveau_gpuobj *obj; | ||
42 | }; | ||
43 | |||
44 | struct nouveau_vma { | ||
45 | struct nouveau_vm *vm; | ||
46 | struct nouveau_mm_node *node; | ||
47 | u64 offset; | ||
48 | u32 access; | ||
49 | }; | ||
50 | |||
51 | struct nouveau_vm { | ||
52 | struct drm_device *dev; | ||
53 | struct nouveau_mm *mm; | ||
54 | int refcount; | ||
55 | |||
56 | struct list_head pgd_list; | ||
57 | atomic_t pgraph_refs; | ||
58 | atomic_t pcrypt_refs; | ||
59 | |||
60 | struct nouveau_vm_pgt *pgt; | ||
61 | u32 fpde; | ||
62 | u32 lpde; | ||
63 | |||
64 | u32 pgt_bits; | ||
65 | u8 spg_shift; | ||
66 | u8 lpg_shift; | ||
67 | |||
68 | void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 type, u32 pde, | ||
69 | struct nouveau_gpuobj *pgt); | ||
70 | void (*unmap_pgt)(struct nouveau_gpuobj *pgd, u32 pde); | ||
71 | void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
72 | struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); | ||
73 | void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
74 | u32 pte, dma_addr_t *, u32 cnt); | ||
75 | void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); | ||
76 | void (*flush)(struct nouveau_vm *); | ||
77 | }; | ||
78 | |||
79 | /* nouveau_vm.c */ | ||
80 | int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset, | ||
81 | u8 pgt_bits, u8 spg_shift, u8 lpg_shift, | ||
82 | struct nouveau_vm **); | ||
83 | int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, | ||
84 | struct nouveau_gpuobj *pgd); | ||
85 | int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, | ||
86 | u32 access, struct nouveau_vma *); | ||
87 | void nouveau_vm_put(struct nouveau_vma *); | ||
88 | void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *); | ||
89 | void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *); | ||
90 | void nouveau_vm_unmap(struct nouveau_vma *); | ||
91 | void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); | ||
92 | void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, | ||
93 | dma_addr_t *); | ||
94 | |||
95 | /* nv50_vm.c */ | ||
96 | void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, | ||
97 | struct nouveau_gpuobj *pgt); | ||
98 | void nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde); | ||
99 | void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
100 | struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); | ||
101 | void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
102 | u32 pte, dma_addr_t *, u32 cnt); | ||
103 | void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); | ||
104 | void nv50_vm_flush(struct nouveau_vm *); | ||
105 | void nv50_vm_flush_engine(struct drm_device *, int engine); | ||
106 | |||
107 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 40e180741629..297505eb98d5 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c | |||
@@ -551,7 +551,10 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
551 | if (dev_priv->card_type >= NV_30) | 551 | if (dev_priv->card_type >= NV_30) |
552 | regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); | 552 | regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); |
553 | 553 | ||
554 | regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC; | 554 | if (dev_priv->card_type >= NV_10) |
555 | regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; | ||
556 | else | ||
557 | regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; | ||
555 | 558 | ||
556 | /* Some misc regs */ | 559 | /* Some misc regs */ |
557 | if (dev_priv->card_type == NV_40) { | 560 | if (dev_priv->card_type == NV_40) { |
@@ -669,6 +672,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc) | |||
669 | if (nv_two_heads(dev)) | 672 | if (nv_two_heads(dev)) |
670 | NVSetOwner(dev, nv_crtc->index); | 673 | NVSetOwner(dev, nv_crtc->index); |
671 | 674 | ||
675 | drm_vblank_pre_modeset(dev, nv_crtc->index); | ||
672 | funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | 676 | funcs->dpms(crtc, DRM_MODE_DPMS_OFF); |
673 | 677 | ||
674 | NVBlankScreen(dev, nv_crtc->index, true); | 678 | NVBlankScreen(dev, nv_crtc->index, true); |
@@ -701,6 +705,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc) | |||
701 | #endif | 705 | #endif |
702 | 706 | ||
703 | funcs->dpms(crtc, DRM_MODE_DPMS_ON); | 707 | funcs->dpms(crtc, DRM_MODE_DPMS_ON); |
708 | drm_vblank_post_modeset(dev, nv_crtc->index); | ||
704 | } | 709 | } |
705 | 710 | ||
706 | static void nv_crtc_destroy(struct drm_crtc *crtc) | 711 | static void nv_crtc_destroy(struct drm_crtc *crtc) |
@@ -986,6 +991,7 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = { | |||
986 | .cursor_move = nv04_crtc_cursor_move, | 991 | .cursor_move = nv04_crtc_cursor_move, |
987 | .gamma_set = nv_crtc_gamma_set, | 992 | .gamma_set = nv_crtc_gamma_set, |
988 | .set_config = drm_crtc_helper_set_config, | 993 | .set_config = drm_crtc_helper_set_config, |
994 | .page_flip = nouveau_crtc_page_flip, | ||
989 | .destroy = nv_crtc_destroy, | 995 | .destroy = nv_crtc_destroy, |
990 | }; | 996 | }; |
991 | 997 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index ba6423f2ffcc..e000455e06d0 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c | |||
@@ -74,14 +74,14 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2]) | |||
74 | * use a 10ms timeout (guards against crtc being inactive, in | 74 | * use a 10ms timeout (guards against crtc being inactive, in |
75 | * which case blank state would never change) | 75 | * which case blank state would never change) |
76 | */ | 76 | */ |
77 | if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, | 77 | if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, |
78 | 0x00000001, 0x00000000)) | 78 | 0x00000001, 0x00000000)) |
79 | return -EBUSY; | 79 | return -EBUSY; |
80 | if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, | 80 | if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, |
81 | 0x00000001, 0x00000001)) | 81 | 0x00000001, 0x00000001)) |
82 | return -EBUSY; | 82 | return -EBUSY; |
83 | if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, | 83 | if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, |
84 | 0x00000001, 0x00000000)) | 84 | 0x00000001, 0x00000000)) |
85 | return -EBUSY; | 85 | return -EBUSY; |
86 | 86 | ||
87 | udelay(100); | 87 | udelay(100); |
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c index 9e28cf772e3c..1715e1464b7d 100644 --- a/drivers/gpu/drm/nouveau/nv04_display.c +++ b/drivers/gpu/drm/nouveau/nv04_display.c | |||
@@ -32,6 +32,9 @@ | |||
32 | #include "nouveau_encoder.h" | 32 | #include "nouveau_encoder.h" |
33 | #include "nouveau_connector.h" | 33 | #include "nouveau_connector.h" |
34 | 34 | ||
35 | static void nv04_vblank_crtc0_isr(struct drm_device *); | ||
36 | static void nv04_vblank_crtc1_isr(struct drm_device *); | ||
37 | |||
35 | static void | 38 | static void |
36 | nv04_display_store_initial_head_owner(struct drm_device *dev) | 39 | nv04_display_store_initial_head_owner(struct drm_device *dev) |
37 | { | 40 | { |
@@ -197,6 +200,8 @@ nv04_display_create(struct drm_device *dev) | |||
197 | func->save(encoder); | 200 | func->save(encoder); |
198 | } | 201 | } |
199 | 202 | ||
203 | nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr); | ||
204 | nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr); | ||
200 | return 0; | 205 | return 0; |
201 | } | 206 | } |
202 | 207 | ||
@@ -208,6 +213,9 @@ nv04_display_destroy(struct drm_device *dev) | |||
208 | 213 | ||
209 | NV_DEBUG_KMS(dev, "\n"); | 214 | NV_DEBUG_KMS(dev, "\n"); |
210 | 215 | ||
216 | nouveau_irq_unregister(dev, 24); | ||
217 | nouveau_irq_unregister(dev, 25); | ||
218 | |||
211 | /* Turn every CRTC off. */ | 219 | /* Turn every CRTC off. */ |
212 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 220 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
213 | struct drm_mode_set modeset = { | 221 | struct drm_mode_set modeset = { |
@@ -258,3 +266,16 @@ nv04_display_init(struct drm_device *dev) | |||
258 | return 0; | 266 | return 0; |
259 | } | 267 | } |
260 | 268 | ||
269 | static void | ||
270 | nv04_vblank_crtc0_isr(struct drm_device *dev) | ||
271 | { | ||
272 | nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); | ||
273 | drm_handle_vblank(dev, 0); | ||
274 | } | ||
275 | |||
276 | static void | ||
277 | nv04_vblank_crtc1_isr(struct drm_device *dev) | ||
278 | { | ||
279 | nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); | ||
280 | drm_handle_vblank(dev, 1); | ||
281 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 33e4c9388bc1..7a1189371096 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
@@ -28,52 +28,39 @@ | |||
28 | #include "nouveau_ramht.h" | 28 | #include "nouveau_ramht.h" |
29 | #include "nouveau_fbcon.h" | 29 | #include "nouveau_fbcon.h" |
30 | 30 | ||
31 | void | 31 | int |
32 | nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | 32 | nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) |
33 | { | 33 | { |
34 | struct nouveau_fbdev *nfbdev = info->par; | 34 | struct nouveau_fbdev *nfbdev = info->par; |
35 | struct drm_device *dev = nfbdev->dev; | 35 | struct drm_device *dev = nfbdev->dev; |
36 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 36 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
37 | struct nouveau_channel *chan = dev_priv->channel; | 37 | struct nouveau_channel *chan = dev_priv->channel; |
38 | int ret; | ||
38 | 39 | ||
39 | if (info->state != FBINFO_STATE_RUNNING) | 40 | ret = RING_SPACE(chan, 4); |
40 | return; | 41 | if (ret) |
41 | 42 | return ret; | |
42 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { | ||
43 | nouveau_fbcon_gpu_lockup(info); | ||
44 | } | ||
45 | |||
46 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | ||
47 | cfb_copyarea(info, region); | ||
48 | return; | ||
49 | } | ||
50 | 43 | ||
51 | BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3); | 44 | BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3); |
52 | OUT_RING(chan, (region->sy << 16) | region->sx); | 45 | OUT_RING(chan, (region->sy << 16) | region->sx); |
53 | OUT_RING(chan, (region->dy << 16) | region->dx); | 46 | OUT_RING(chan, (region->dy << 16) | region->dx); |
54 | OUT_RING(chan, (region->height << 16) | region->width); | 47 | OUT_RING(chan, (region->height << 16) | region->width); |
55 | FIRE_RING(chan); | 48 | FIRE_RING(chan); |
49 | return 0; | ||
56 | } | 50 | } |
57 | 51 | ||
58 | void | 52 | int |
59 | nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 53 | nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
60 | { | 54 | { |
61 | struct nouveau_fbdev *nfbdev = info->par; | 55 | struct nouveau_fbdev *nfbdev = info->par; |
62 | struct drm_device *dev = nfbdev->dev; | 56 | struct drm_device *dev = nfbdev->dev; |
63 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 57 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
64 | struct nouveau_channel *chan = dev_priv->channel; | 58 | struct nouveau_channel *chan = dev_priv->channel; |
59 | int ret; | ||
65 | 60 | ||
66 | if (info->state != FBINFO_STATE_RUNNING) | 61 | ret = RING_SPACE(chan, 7); |
67 | return; | 62 | if (ret) |
68 | 63 | return ret; | |
69 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { | ||
70 | nouveau_fbcon_gpu_lockup(info); | ||
71 | } | ||
72 | |||
73 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | ||
74 | cfb_fillrect(info, rect); | ||
75 | return; | ||
76 | } | ||
77 | 64 | ||
78 | BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); | 65 | BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); |
79 | OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); | 66 | OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); |
@@ -87,9 +74,10 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
87 | OUT_RING(chan, (rect->dx << 16) | rect->dy); | 74 | OUT_RING(chan, (rect->dx << 16) | rect->dy); |
88 | OUT_RING(chan, (rect->width << 16) | rect->height); | 75 | OUT_RING(chan, (rect->width << 16) | rect->height); |
89 | FIRE_RING(chan); | 76 | FIRE_RING(chan); |
77 | return 0; | ||
90 | } | 78 | } |
91 | 79 | ||
92 | void | 80 | int |
93 | nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | 81 | nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) |
94 | { | 82 | { |
95 | struct nouveau_fbdev *nfbdev = info->par; | 83 | struct nouveau_fbdev *nfbdev = info->par; |
@@ -101,23 +89,14 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
101 | uint32_t dsize; | 89 | uint32_t dsize; |
102 | uint32_t width; | 90 | uint32_t width; |
103 | uint32_t *data = (uint32_t *)image->data; | 91 | uint32_t *data = (uint32_t *)image->data; |
92 | int ret; | ||
104 | 93 | ||
105 | if (info->state != FBINFO_STATE_RUNNING) | 94 | if (image->depth != 1) |
106 | return; | 95 | return -ENODEV; |
107 | |||
108 | if (image->depth != 1) { | ||
109 | cfb_imageblit(info, image); | ||
110 | return; | ||
111 | } | ||
112 | |||
113 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { | ||
114 | nouveau_fbcon_gpu_lockup(info); | ||
115 | } | ||
116 | 96 | ||
117 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 97 | ret = RING_SPACE(chan, 8); |
118 | cfb_imageblit(info, image); | 98 | if (ret) |
119 | return; | 99 | return ret; |
120 | } | ||
121 | 100 | ||
122 | width = ALIGN(image->width, 8); | 101 | width = ALIGN(image->width, 8); |
123 | dsize = ALIGN(width * image->height, 32) >> 5; | 102 | dsize = ALIGN(width * image->height, 32) >> 5; |
@@ -144,11 +123,9 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
144 | while (dsize) { | 123 | while (dsize) { |
145 | int iter_len = dsize > 128 ? 128 : dsize; | 124 | int iter_len = dsize > 128 ? 128 : dsize; |
146 | 125 | ||
147 | if (RING_SPACE(chan, iter_len + 1)) { | 126 | ret = RING_SPACE(chan, iter_len + 1); |
148 | nouveau_fbcon_gpu_lockup(info); | 127 | if (ret) |
149 | cfb_imageblit(info, image); | 128 | return ret; |
150 | return; | ||
151 | } | ||
152 | 129 | ||
153 | BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len); | 130 | BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len); |
154 | OUT_RINGp(chan, data, iter_len); | 131 | OUT_RINGp(chan, data, iter_len); |
@@ -157,22 +134,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
157 | } | 134 | } |
158 | 135 | ||
159 | FIRE_RING(chan); | 136 | FIRE_RING(chan); |
160 | } | 137 | return 0; |
161 | |||
162 | static int | ||
163 | nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle) | ||
164 | { | ||
165 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
166 | struct nouveau_gpuobj *obj = NULL; | ||
167 | int ret; | ||
168 | |||
169 | ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj); | ||
170 | if (ret) | ||
171 | return ret; | ||
172 | |||
173 | ret = nouveau_ramht_insert(dev_priv->channel, handle, obj); | ||
174 | nouveau_gpuobj_ref(NULL, &obj); | ||
175 | return ret; | ||
176 | } | 138 | } |
177 | 139 | ||
178 | int | 140 | int |
@@ -214,29 +176,31 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
214 | return -EINVAL; | 176 | return -EINVAL; |
215 | } | 177 | } |
216 | 178 | ||
217 | ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? | 179 | ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D, |
218 | 0x0062 : 0x0042, NvCtxSurf2D); | 180 | dev_priv->card_type >= NV_10 ? |
181 | 0x0062 : 0x0042); | ||
219 | if (ret) | 182 | if (ret) |
220 | return ret; | 183 | return ret; |
221 | 184 | ||
222 | ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect); | 185 | ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019); |
223 | if (ret) | 186 | if (ret) |
224 | return ret; | 187 | return ret; |
225 | 188 | ||
226 | ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop); | 189 | ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043); |
227 | if (ret) | 190 | if (ret) |
228 | return ret; | 191 | return ret; |
229 | 192 | ||
230 | ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt); | 193 | ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044); |
231 | if (ret) | 194 | if (ret) |
232 | return ret; | 195 | return ret; |
233 | 196 | ||
234 | ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect); | 197 | ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a); |
235 | if (ret) | 198 | if (ret) |
236 | return ret; | 199 | return ret; |
237 | 200 | ||
238 | ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ? | 201 | ret = nouveau_gpuobj_gr_new(chan, NvImageBlit, |
239 | 0x009f : 0x005f, NvImageBlit); | 202 | dev_priv->chipset >= 0x11 ? |
203 | 0x009f : 0x005f); | ||
240 | if (ret) | 204 | if (ret) |
241 | return ret; | 205 | return ret; |
242 | 206 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 708293b7ddcd..f89d104698df 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_ramht.h" | 30 | #include "nouveau_ramht.h" |
31 | #include "nouveau_util.h" | ||
31 | 32 | ||
32 | #define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) | 33 | #define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) |
33 | #define NV04_RAMFC__SIZE 32 | 34 | #define NV04_RAMFC__SIZE 32 |
@@ -128,6 +129,11 @@ nv04_fifo_create_context(struct nouveau_channel *chan) | |||
128 | if (ret) | 129 | if (ret) |
129 | return ret; | 130 | return ret; |
130 | 131 | ||
132 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
133 | NV03_USER(chan->id), PAGE_SIZE); | ||
134 | if (!chan->user) | ||
135 | return -ENOMEM; | ||
136 | |||
131 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 137 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
132 | 138 | ||
133 | /* Setup initial state */ | 139 | /* Setup initial state */ |
@@ -151,10 +157,31 @@ void | |||
151 | nv04_fifo_destroy_context(struct nouveau_channel *chan) | 157 | nv04_fifo_destroy_context(struct nouveau_channel *chan) |
152 | { | 158 | { |
153 | struct drm_device *dev = chan->dev; | 159 | struct drm_device *dev = chan->dev; |
160 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
161 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
162 | unsigned long flags; | ||
154 | 163 | ||
155 | nv_wr32(dev, NV04_PFIFO_MODE, | 164 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
156 | nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); | 165 | pfifo->reassign(dev, false); |
157 | 166 | ||
167 | /* Unload the context if it's the currently active one */ | ||
168 | if (pfifo->channel_id(dev) == chan->id) { | ||
169 | pfifo->disable(dev); | ||
170 | pfifo->unload_context(dev); | ||
171 | pfifo->enable(dev); | ||
172 | } | ||
173 | |||
174 | /* Keep it from being rescheduled */ | ||
175 | nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0); | ||
176 | |||
177 | pfifo->reassign(dev, true); | ||
178 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
179 | |||
180 | /* Free the channel resources */ | ||
181 | if (chan->user) { | ||
182 | iounmap(chan->user); | ||
183 | chan->user = NULL; | ||
184 | } | ||
158 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | 185 | nouveau_gpuobj_ref(NULL, &chan->ramfc); |
159 | } | 186 | } |
160 | 187 | ||
@@ -208,7 +235,7 @@ nv04_fifo_unload_context(struct drm_device *dev) | |||
208 | if (chid < 0 || chid >= dev_priv->engine.fifo.channels) | 235 | if (chid < 0 || chid >= dev_priv->engine.fifo.channels) |
209 | return 0; | 236 | return 0; |
210 | 237 | ||
211 | chan = dev_priv->fifos[chid]; | 238 | chan = dev_priv->channels.ptr[chid]; |
212 | if (!chan) { | 239 | if (!chan) { |
213 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); | 240 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); |
214 | return -EINVAL; | 241 | return -EINVAL; |
@@ -267,6 +294,7 @@ nv04_fifo_init_ramxx(struct drm_device *dev) | |||
267 | static void | 294 | static void |
268 | nv04_fifo_init_intr(struct drm_device *dev) | 295 | nv04_fifo_init_intr(struct drm_device *dev) |
269 | { | 296 | { |
297 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
270 | nv_wr32(dev, 0x002100, 0xffffffff); | 298 | nv_wr32(dev, 0x002100, 0xffffffff); |
271 | nv_wr32(dev, 0x002140, 0xffffffff); | 299 | nv_wr32(dev, 0x002140, 0xffffffff); |
272 | } | 300 | } |
@@ -289,7 +317,7 @@ nv04_fifo_init(struct drm_device *dev) | |||
289 | pfifo->reassign(dev, true); | 317 | pfifo->reassign(dev, true); |
290 | 318 | ||
291 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 319 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
292 | if (dev_priv->fifos[i]) { | 320 | if (dev_priv->channels.ptr[i]) { |
293 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | 321 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); |
294 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | 322 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); |
295 | } | 323 | } |
@@ -298,3 +326,207 @@ nv04_fifo_init(struct drm_device *dev) | |||
298 | return 0; | 326 | return 0; |
299 | } | 327 | } |
300 | 328 | ||
329 | void | ||
330 | nv04_fifo_fini(struct drm_device *dev) | ||
331 | { | ||
332 | nv_wr32(dev, 0x2140, 0x00000000); | ||
333 | nouveau_irq_unregister(dev, 8); | ||
334 | } | ||
335 | |||
336 | static bool | ||
337 | nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) | ||
338 | { | ||
339 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
340 | struct nouveau_channel *chan = NULL; | ||
341 | struct nouveau_gpuobj *obj; | ||
342 | unsigned long flags; | ||
343 | const int subc = (addr >> 13) & 0x7; | ||
344 | const int mthd = addr & 0x1ffc; | ||
345 | bool handled = false; | ||
346 | u32 engine; | ||
347 | |||
348 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
349 | if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) | ||
350 | chan = dev_priv->channels.ptr[chid]; | ||
351 | if (unlikely(!chan)) | ||
352 | goto out; | ||
353 | |||
354 | switch (mthd) { | ||
355 | case 0x0000: /* bind object to subchannel */ | ||
356 | obj = nouveau_ramht_find(chan, data); | ||
357 | if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) | ||
358 | break; | ||
359 | |||
360 | chan->sw_subchannel[subc] = obj->class; | ||
361 | engine = 0x0000000f << (subc * 4); | ||
362 | |||
363 | nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); | ||
364 | handled = true; | ||
365 | break; | ||
366 | default: | ||
367 | engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE); | ||
368 | if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) | ||
369 | break; | ||
370 | |||
371 | if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc], | ||
372 | mthd, data)) | ||
373 | handled = true; | ||
374 | break; | ||
375 | } | ||
376 | |||
377 | out: | ||
378 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
379 | return handled; | ||
380 | } | ||
381 | |||
382 | void | ||
383 | nv04_fifo_isr(struct drm_device *dev) | ||
384 | { | ||
385 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
386 | struct nouveau_engine *engine = &dev_priv->engine; | ||
387 | uint32_t status, reassign; | ||
388 | int cnt = 0; | ||
389 | |||
390 | reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; | ||
391 | while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { | ||
392 | uint32_t chid, get; | ||
393 | |||
394 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | ||
395 | |||
396 | chid = engine->fifo.channel_id(dev); | ||
397 | get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); | ||
398 | |||
399 | if (status & NV_PFIFO_INTR_CACHE_ERROR) { | ||
400 | uint32_t mthd, data; | ||
401 | int ptr; | ||
402 | |||
403 | /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before | ||
404 | * wrapping on my G80 chips, but CACHE1 isn't big | ||
405 | * enough for this much data.. Tests show that it | ||
406 | * wraps around to the start at GET=0x800.. No clue | ||
407 | * as to why.. | ||
408 | */ | ||
409 | ptr = (get & 0x7ff) >> 2; | ||
410 | |||
411 | if (dev_priv->card_type < NV_40) { | ||
412 | mthd = nv_rd32(dev, | ||
413 | NV04_PFIFO_CACHE1_METHOD(ptr)); | ||
414 | data = nv_rd32(dev, | ||
415 | NV04_PFIFO_CACHE1_DATA(ptr)); | ||
416 | } else { | ||
417 | mthd = nv_rd32(dev, | ||
418 | NV40_PFIFO_CACHE1_METHOD(ptr)); | ||
419 | data = nv_rd32(dev, | ||
420 | NV40_PFIFO_CACHE1_DATA(ptr)); | ||
421 | } | ||
422 | |||
423 | if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) { | ||
424 | NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " | ||
425 | "Mthd 0x%04x Data 0x%08x\n", | ||
426 | chid, (mthd >> 13) & 7, mthd & 0x1ffc, | ||
427 | data); | ||
428 | } | ||
429 | |||
430 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); | ||
431 | nv_wr32(dev, NV03_PFIFO_INTR_0, | ||
432 | NV_PFIFO_INTR_CACHE_ERROR); | ||
433 | |||
434 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, | ||
435 | nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1); | ||
436 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); | ||
437 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, | ||
438 | nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1); | ||
439 | nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); | ||
440 | |||
441 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, | ||
442 | nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); | ||
443 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
444 | |||
445 | status &= ~NV_PFIFO_INTR_CACHE_ERROR; | ||
446 | } | ||
447 | |||
448 | if (status & NV_PFIFO_INTR_DMA_PUSHER) { | ||
449 | u32 dma_get = nv_rd32(dev, 0x003244); | ||
450 | u32 dma_put = nv_rd32(dev, 0x003240); | ||
451 | u32 push = nv_rd32(dev, 0x003220); | ||
452 | u32 state = nv_rd32(dev, 0x003228); | ||
453 | |||
454 | if (dev_priv->card_type == NV_50) { | ||
455 | u32 ho_get = nv_rd32(dev, 0x003328); | ||
456 | u32 ho_put = nv_rd32(dev, 0x003320); | ||
457 | u32 ib_get = nv_rd32(dev, 0x003334); | ||
458 | u32 ib_put = nv_rd32(dev, 0x003330); | ||
459 | |||
460 | if (nouveau_ratelimit()) | ||
461 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " | ||
462 | "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " | ||
463 | "State 0x%08x Push 0x%08x\n", | ||
464 | chid, ho_get, dma_get, ho_put, | ||
465 | dma_put, ib_get, ib_put, state, | ||
466 | push); | ||
467 | |||
468 | /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ | ||
469 | nv_wr32(dev, 0x003364, 0x00000000); | ||
470 | if (dma_get != dma_put || ho_get != ho_put) { | ||
471 | nv_wr32(dev, 0x003244, dma_put); | ||
472 | nv_wr32(dev, 0x003328, ho_put); | ||
473 | } else | ||
474 | if (ib_get != ib_put) { | ||
475 | nv_wr32(dev, 0x003334, ib_put); | ||
476 | } | ||
477 | } else { | ||
478 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " | ||
479 | "Put 0x%08x State 0x%08x Push 0x%08x\n", | ||
480 | chid, dma_get, dma_put, state, push); | ||
481 | |||
482 | if (dma_get != dma_put) | ||
483 | nv_wr32(dev, 0x003244, dma_put); | ||
484 | } | ||
485 | |||
486 | nv_wr32(dev, 0x003228, 0x00000000); | ||
487 | nv_wr32(dev, 0x003220, 0x00000001); | ||
488 | nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); | ||
489 | status &= ~NV_PFIFO_INTR_DMA_PUSHER; | ||
490 | } | ||
491 | |||
492 | if (status & NV_PFIFO_INTR_SEMAPHORE) { | ||
493 | uint32_t sem; | ||
494 | |||
495 | status &= ~NV_PFIFO_INTR_SEMAPHORE; | ||
496 | nv_wr32(dev, NV03_PFIFO_INTR_0, | ||
497 | NV_PFIFO_INTR_SEMAPHORE); | ||
498 | |||
499 | sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); | ||
500 | nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); | ||
501 | |||
502 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); | ||
503 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
504 | } | ||
505 | |||
506 | if (dev_priv->card_type == NV_50) { | ||
507 | if (status & 0x00000010) { | ||
508 | nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT"); | ||
509 | status &= ~0x00000010; | ||
510 | nv_wr32(dev, 0x002100, 0x00000010); | ||
511 | } | ||
512 | } | ||
513 | |||
514 | if (status) { | ||
515 | if (nouveau_ratelimit()) | ||
516 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", | ||
517 | status, chid); | ||
518 | nv_wr32(dev, NV03_PFIFO_INTR_0, status); | ||
519 | status = 0; | ||
520 | } | ||
521 | |||
522 | nv_wr32(dev, NV03_PFIFO_CACHES, reassign); | ||
523 | } | ||
524 | |||
525 | if (status) { | ||
526 | NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt); | ||
527 | nv_wr32(dev, 0x2140, 0); | ||
528 | nv_wr32(dev, 0x140, 0); | ||
529 | } | ||
530 | |||
531 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); | ||
532 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index c8973421b635..af75015068d6 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c | |||
@@ -26,6 +26,11 @@ | |||
26 | #include "drm.h" | 26 | #include "drm.h" |
27 | #include "nouveau_drm.h" | 27 | #include "nouveau_drm.h" |
28 | #include "nouveau_drv.h" | 28 | #include "nouveau_drv.h" |
29 | #include "nouveau_hw.h" | ||
30 | #include "nouveau_util.h" | ||
31 | |||
32 | static int nv04_graph_register(struct drm_device *dev); | ||
33 | static void nv04_graph_isr(struct drm_device *dev); | ||
29 | 34 | ||
30 | static uint32_t nv04_graph_ctx_regs[] = { | 35 | static uint32_t nv04_graph_ctx_regs[] = { |
31 | 0x0040053c, | 36 | 0x0040053c, |
@@ -357,10 +362,10 @@ nv04_graph_channel(struct drm_device *dev) | |||
357 | if (chid >= dev_priv->engine.fifo.channels) | 362 | if (chid >= dev_priv->engine.fifo.channels) |
358 | return NULL; | 363 | return NULL; |
359 | 364 | ||
360 | return dev_priv->fifos[chid]; | 365 | return dev_priv->channels.ptr[chid]; |
361 | } | 366 | } |
362 | 367 | ||
363 | void | 368 | static void |
364 | nv04_graph_context_switch(struct drm_device *dev) | 369 | nv04_graph_context_switch(struct drm_device *dev) |
365 | { | 370 | { |
366 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 371 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -368,7 +373,6 @@ nv04_graph_context_switch(struct drm_device *dev) | |||
368 | struct nouveau_channel *chan = NULL; | 373 | struct nouveau_channel *chan = NULL; |
369 | int chid; | 374 | int chid; |
370 | 375 | ||
371 | pgraph->fifo_access(dev, false); | ||
372 | nouveau_wait_for_idle(dev); | 376 | nouveau_wait_for_idle(dev); |
373 | 377 | ||
374 | /* If previous context is valid, we need to save it */ | 378 | /* If previous context is valid, we need to save it */ |
@@ -376,11 +380,9 @@ nv04_graph_context_switch(struct drm_device *dev) | |||
376 | 380 | ||
377 | /* Load context for next channel */ | 381 | /* Load context for next channel */ |
378 | chid = dev_priv->engine.fifo.channel_id(dev); | 382 | chid = dev_priv->engine.fifo.channel_id(dev); |
379 | chan = dev_priv->fifos[chid]; | 383 | chan = dev_priv->channels.ptr[chid]; |
380 | if (chan) | 384 | if (chan) |
381 | nv04_graph_load_context(chan); | 385 | nv04_graph_load_context(chan); |
382 | |||
383 | pgraph->fifo_access(dev, true); | ||
384 | } | 386 | } |
385 | 387 | ||
386 | static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) | 388 | static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) |
@@ -412,10 +414,25 @@ int nv04_graph_create_context(struct nouveau_channel *chan) | |||
412 | 414 | ||
413 | void nv04_graph_destroy_context(struct nouveau_channel *chan) | 415 | void nv04_graph_destroy_context(struct nouveau_channel *chan) |
414 | { | 416 | { |
417 | struct drm_device *dev = chan->dev; | ||
418 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
419 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
415 | struct graph_state *pgraph_ctx = chan->pgraph_ctx; | 420 | struct graph_state *pgraph_ctx = chan->pgraph_ctx; |
421 | unsigned long flags; | ||
422 | |||
423 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
424 | pgraph->fifo_access(dev, false); | ||
425 | |||
426 | /* Unload the context if it's the currently active one */ | ||
427 | if (pgraph->channel(dev) == chan) | ||
428 | pgraph->unload_context(dev); | ||
416 | 429 | ||
430 | /* Free the context resources */ | ||
417 | kfree(pgraph_ctx); | 431 | kfree(pgraph_ctx); |
418 | chan->pgraph_ctx = NULL; | 432 | chan->pgraph_ctx = NULL; |
433 | |||
434 | pgraph->fifo_access(dev, true); | ||
435 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
419 | } | 436 | } |
420 | 437 | ||
421 | int nv04_graph_load_context(struct nouveau_channel *chan) | 438 | int nv04_graph_load_context(struct nouveau_channel *chan) |
@@ -468,13 +485,19 @@ int nv04_graph_init(struct drm_device *dev) | |||
468 | { | 485 | { |
469 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 486 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
470 | uint32_t tmp; | 487 | uint32_t tmp; |
488 | int ret; | ||
471 | 489 | ||
472 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | 490 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & |
473 | ~NV_PMC_ENABLE_PGRAPH); | 491 | ~NV_PMC_ENABLE_PGRAPH); |
474 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | | 492 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | |
475 | NV_PMC_ENABLE_PGRAPH); | 493 | NV_PMC_ENABLE_PGRAPH); |
476 | 494 | ||
495 | ret = nv04_graph_register(dev); | ||
496 | if (ret) | ||
497 | return ret; | ||
498 | |||
477 | /* Enable PGRAPH interrupts */ | 499 | /* Enable PGRAPH interrupts */ |
500 | nouveau_irq_register(dev, 12, nv04_graph_isr); | ||
478 | nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); | 501 | nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); |
479 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); | 502 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); |
480 | 503 | ||
@@ -510,6 +533,8 @@ int nv04_graph_init(struct drm_device *dev) | |||
510 | 533 | ||
511 | void nv04_graph_takedown(struct drm_device *dev) | 534 | void nv04_graph_takedown(struct drm_device *dev) |
512 | { | 535 | { |
536 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); | ||
537 | nouveau_irq_unregister(dev, 12); | ||
513 | } | 538 | } |
514 | 539 | ||
515 | void | 540 | void |
@@ -524,13 +549,27 @@ nv04_graph_fifo_access(struct drm_device *dev, bool enabled) | |||
524 | } | 549 | } |
525 | 550 | ||
526 | static int | 551 | static int |
527 | nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, | 552 | nv04_graph_mthd_set_ref(struct nouveau_channel *chan, |
528 | int mthd, uint32_t data) | 553 | u32 class, u32 mthd, u32 data) |
529 | { | 554 | { |
530 | atomic_set(&chan->fence.last_sequence_irq, data); | 555 | atomic_set(&chan->fence.last_sequence_irq, data); |
531 | return 0; | 556 | return 0; |
532 | } | 557 | } |
533 | 558 | ||
559 | int | ||
560 | nv04_graph_mthd_page_flip(struct nouveau_channel *chan, | ||
561 | u32 class, u32 mthd, u32 data) | ||
562 | { | ||
563 | struct drm_device *dev = chan->dev; | ||
564 | struct nouveau_page_flip_state s; | ||
565 | |||
566 | if (!nouveau_finish_page_flip(chan, &s)) | ||
567 | nv_set_crtc_base(dev, s.crtc, | ||
568 | s.offset + s.y * s.pitch + s.x * s.bpp / 8); | ||
569 | |||
570 | return 0; | ||
571 | } | ||
572 | |||
534 | /* | 573 | /* |
535 | * Software methods, why they are needed, and how they all work: | 574 | * Software methods, why they are needed, and how they all work: |
536 | * | 575 | * |
@@ -606,12 +645,12 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, | |||
606 | */ | 645 | */ |
607 | 646 | ||
608 | static void | 647 | static void |
609 | nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) | 648 | nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value) |
610 | { | 649 | { |
611 | struct drm_device *dev = chan->dev; | 650 | struct drm_device *dev = chan->dev; |
612 | uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; | 651 | u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; |
613 | int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; | 652 | int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; |
614 | uint32_t tmp; | 653 | u32 tmp; |
615 | 654 | ||
616 | tmp = nv_ri32(dev, instance); | 655 | tmp = nv_ri32(dev, instance); |
617 | tmp &= ~mask; | 656 | tmp &= ~mask; |
@@ -623,11 +662,11 @@ nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) | |||
623 | } | 662 | } |
624 | 663 | ||
625 | static void | 664 | static void |
626 | nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value) | 665 | nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value) |
627 | { | 666 | { |
628 | struct drm_device *dev = chan->dev; | 667 | struct drm_device *dev = chan->dev; |
629 | uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; | 668 | u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; |
630 | uint32_t tmp, ctx1; | 669 | u32 tmp, ctx1; |
631 | int class, op, valid = 1; | 670 | int class, op, valid = 1; |
632 | 671 | ||
633 | ctx1 = nv_ri32(dev, instance); | 672 | ctx1 = nv_ri32(dev, instance); |
@@ -672,13 +711,13 @@ nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t val | |||
672 | } | 711 | } |
673 | 712 | ||
674 | static int | 713 | static int |
675 | nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, | 714 | nv04_graph_mthd_set_operation(struct nouveau_channel *chan, |
676 | int mthd, uint32_t data) | 715 | u32 class, u32 mthd, u32 data) |
677 | { | 716 | { |
678 | if (data > 5) | 717 | if (data > 5) |
679 | return 1; | 718 | return 1; |
680 | /* Old versions of the objects only accept first three operations. */ | 719 | /* Old versions of the objects only accept first three operations. */ |
681 | if (data > 2 && grclass < 0x40) | 720 | if (data > 2 && class < 0x40) |
682 | return 1; | 721 | return 1; |
683 | nv04_graph_set_ctx1(chan, 0x00038000, data << 15); | 722 | nv04_graph_set_ctx1(chan, 0x00038000, data << 15); |
684 | /* changing operation changes set of objects needed for validation */ | 723 | /* changing operation changes set of objects needed for validation */ |
@@ -687,8 +726,8 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, | |||
687 | } | 726 | } |
688 | 727 | ||
689 | static int | 728 | static int |
690 | nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, | 729 | nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, |
691 | int mthd, uint32_t data) | 730 | u32 class, u32 mthd, u32 data) |
692 | { | 731 | { |
693 | uint32_t min = data & 0xffff, max; | 732 | uint32_t min = data & 0xffff, max; |
694 | uint32_t w = data >> 16; | 733 | uint32_t w = data >> 16; |
@@ -706,8 +745,8 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, | |||
706 | } | 745 | } |
707 | 746 | ||
708 | static int | 747 | static int |
709 | nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, | 748 | nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, |
710 | int mthd, uint32_t data) | 749 | u32 class, u32 mthd, u32 data) |
711 | { | 750 | { |
712 | uint32_t min = data & 0xffff, max; | 751 | uint32_t min = data & 0xffff, max; |
713 | uint32_t w = data >> 16; | 752 | uint32_t w = data >> 16; |
@@ -725,8 +764,8 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, | |||
725 | } | 764 | } |
726 | 765 | ||
727 | static int | 766 | static int |
728 | nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, | 767 | nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, |
729 | int mthd, uint32_t data) | 768 | u32 class, u32 mthd, u32 data) |
730 | { | 769 | { |
731 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 770 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
732 | case 0x30: | 771 | case 0x30: |
@@ -742,8 +781,8 @@ nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, | |||
742 | } | 781 | } |
743 | 782 | ||
744 | static int | 783 | static int |
745 | nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, | 784 | nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, |
746 | int mthd, uint32_t data) | 785 | u32 class, u32 mthd, u32 data) |
747 | { | 786 | { |
748 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 787 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
749 | case 0x30: | 788 | case 0x30: |
@@ -763,8 +802,8 @@ nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, | |||
763 | } | 802 | } |
764 | 803 | ||
765 | static int | 804 | static int |
766 | nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, | 805 | nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, |
767 | int mthd, uint32_t data) | 806 | u32 class, u32 mthd, u32 data) |
768 | { | 807 | { |
769 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 808 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
770 | case 0x30: | 809 | case 0x30: |
@@ -778,8 +817,8 @@ nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, | |||
778 | } | 817 | } |
779 | 818 | ||
780 | static int | 819 | static int |
781 | nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, | 820 | nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, |
782 | int mthd, uint32_t data) | 821 | u32 class, u32 mthd, u32 data) |
783 | { | 822 | { |
784 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 823 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
785 | case 0x30: | 824 | case 0x30: |
@@ -793,8 +832,8 @@ nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, | |||
793 | } | 832 | } |
794 | 833 | ||
795 | static int | 834 | static int |
796 | nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, | 835 | nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, |
797 | int mthd, uint32_t data) | 836 | u32 class, u32 mthd, u32 data) |
798 | { | 837 | { |
799 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 838 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
800 | case 0x30: | 839 | case 0x30: |
@@ -808,8 +847,8 @@ nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, | |||
808 | } | 847 | } |
809 | 848 | ||
810 | static int | 849 | static int |
811 | nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, | 850 | nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, |
812 | int mthd, uint32_t data) | 851 | u32 class, u32 mthd, u32 data) |
813 | { | 852 | { |
814 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 853 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
815 | case 0x30: | 854 | case 0x30: |
@@ -823,8 +862,8 @@ nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, | |||
823 | } | 862 | } |
824 | 863 | ||
825 | static int | 864 | static int |
826 | nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, | 865 | nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, |
827 | int mthd, uint32_t data) | 866 | u32 class, u32 mthd, u32 data) |
828 | { | 867 | { |
829 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 868 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
830 | case 0x30: | 869 | case 0x30: |
@@ -838,8 +877,8 @@ nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, | |||
838 | } | 877 | } |
839 | 878 | ||
840 | static int | 879 | static int |
841 | nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, | 880 | nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, |
842 | int mthd, uint32_t data) | 881 | u32 class, u32 mthd, u32 data) |
843 | { | 882 | { |
844 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 883 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
845 | case 0x30: | 884 | case 0x30: |
@@ -853,8 +892,8 @@ nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, | |||
853 | } | 892 | } |
854 | 893 | ||
855 | static int | 894 | static int |
856 | nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, | 895 | nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, |
857 | int mthd, uint32_t data) | 896 | u32 class, u32 mthd, u32 data) |
858 | { | 897 | { |
859 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 898 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
860 | case 0x30: | 899 | case 0x30: |
@@ -868,8 +907,8 @@ nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, | |||
868 | } | 907 | } |
869 | 908 | ||
870 | static int | 909 | static int |
871 | nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, | 910 | nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, |
872 | int mthd, uint32_t data) | 911 | u32 class, u32 mthd, u32 data) |
873 | { | 912 | { |
874 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 913 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
875 | case 0x30: | 914 | case 0x30: |
@@ -883,8 +922,8 @@ nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, | |||
883 | } | 922 | } |
884 | 923 | ||
885 | static int | 924 | static int |
886 | nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, | 925 | nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, |
887 | int mthd, uint32_t data) | 926 | u32 class, u32 mthd, u32 data) |
888 | { | 927 | { |
889 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 928 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
890 | case 0x30: | 929 | case 0x30: |
@@ -898,8 +937,8 @@ nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, | |||
898 | } | 937 | } |
899 | 938 | ||
900 | static int | 939 | static int |
901 | nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, | 940 | nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, |
902 | int mthd, uint32_t data) | 941 | u32 class, u32 mthd, u32 data) |
903 | { | 942 | { |
904 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 943 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
905 | case 0x30: | 944 | case 0x30: |
@@ -913,8 +952,8 @@ nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, | |||
913 | } | 952 | } |
914 | 953 | ||
915 | static int | 954 | static int |
916 | nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, | 955 | nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, |
917 | int mthd, uint32_t data) | 956 | u32 class, u32 mthd, u32 data) |
918 | { | 957 | { |
919 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | 958 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { |
920 | case 0x30: | 959 | case 0x30: |
@@ -930,194 +969,346 @@ nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, | |||
930 | return 1; | 969 | return 1; |
931 | } | 970 | } |
932 | 971 | ||
933 | static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { | 972 | static int |
934 | { 0x0150, nv04_graph_mthd_set_ref }, | 973 | nv04_graph_register(struct drm_device *dev) |
935 | {} | 974 | { |
936 | }; | 975 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
937 | |||
938 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = { | ||
939 | { 0x0184, nv04_graph_mthd_bind_nv01_patt }, | ||
940 | { 0x0188, nv04_graph_mthd_bind_rop }, | ||
941 | { 0x018c, nv04_graph_mthd_bind_beta1 }, | ||
942 | { 0x0190, nv04_graph_mthd_bind_surf_dst }, | ||
943 | { 0x02fc, nv04_graph_mthd_set_operation }, | ||
944 | {}, | ||
945 | }; | ||
946 | |||
947 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = { | ||
948 | { 0x0188, nv04_graph_mthd_bind_nv04_patt }, | ||
949 | { 0x018c, nv04_graph_mthd_bind_rop }, | ||
950 | { 0x0190, nv04_graph_mthd_bind_beta1 }, | ||
951 | { 0x0194, nv04_graph_mthd_bind_beta4 }, | ||
952 | { 0x0198, nv04_graph_mthd_bind_surf2d }, | ||
953 | { 0x02fc, nv04_graph_mthd_set_operation }, | ||
954 | {}, | ||
955 | }; | ||
956 | |||
957 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = { | ||
958 | { 0x0184, nv04_graph_mthd_bind_chroma }, | ||
959 | { 0x0188, nv04_graph_mthd_bind_clip }, | ||
960 | { 0x018c, nv04_graph_mthd_bind_nv01_patt }, | ||
961 | { 0x0190, nv04_graph_mthd_bind_rop }, | ||
962 | { 0x0194, nv04_graph_mthd_bind_beta1 }, | ||
963 | { 0x0198, nv04_graph_mthd_bind_surf_dst }, | ||
964 | { 0x019c, nv04_graph_mthd_bind_surf_src }, | ||
965 | { 0x02fc, nv04_graph_mthd_set_operation }, | ||
966 | {}, | ||
967 | }; | ||
968 | |||
969 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = { | ||
970 | { 0x0184, nv04_graph_mthd_bind_chroma }, | ||
971 | { 0x0188, nv04_graph_mthd_bind_clip }, | ||
972 | { 0x018c, nv04_graph_mthd_bind_nv04_patt }, | ||
973 | { 0x0190, nv04_graph_mthd_bind_rop }, | ||
974 | { 0x0194, nv04_graph_mthd_bind_beta1 }, | ||
975 | { 0x0198, nv04_graph_mthd_bind_beta4 }, | ||
976 | { 0x019c, nv04_graph_mthd_bind_surf2d }, | ||
977 | { 0x02fc, nv04_graph_mthd_set_operation }, | ||
978 | {}, | ||
979 | }; | ||
980 | |||
981 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = { | ||
982 | { 0x0188, nv04_graph_mthd_bind_chroma }, | ||
983 | { 0x018c, nv04_graph_mthd_bind_clip }, | ||
984 | { 0x0190, nv04_graph_mthd_bind_nv04_patt }, | ||
985 | { 0x0194, nv04_graph_mthd_bind_rop }, | ||
986 | { 0x0198, nv04_graph_mthd_bind_beta1 }, | ||
987 | { 0x019c, nv04_graph_mthd_bind_beta4 }, | ||
988 | { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, | ||
989 | { 0x03e4, nv04_graph_mthd_set_operation }, | ||
990 | {}, | ||
991 | }; | ||
992 | |||
993 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = { | ||
994 | { 0x0184, nv04_graph_mthd_bind_chroma }, | ||
995 | { 0x0188, nv04_graph_mthd_bind_clip }, | ||
996 | { 0x018c, nv04_graph_mthd_bind_nv01_patt }, | ||
997 | { 0x0190, nv04_graph_mthd_bind_rop }, | ||
998 | { 0x0194, nv04_graph_mthd_bind_beta1 }, | ||
999 | { 0x0198, nv04_graph_mthd_bind_surf_dst }, | ||
1000 | { 0x02fc, nv04_graph_mthd_set_operation }, | ||
1001 | {}, | ||
1002 | }; | ||
1003 | |||
1004 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = { | ||
1005 | { 0x0184, nv04_graph_mthd_bind_chroma }, | ||
1006 | { 0x0188, nv04_graph_mthd_bind_nv01_patt }, | ||
1007 | { 0x018c, nv04_graph_mthd_bind_rop }, | ||
1008 | { 0x0190, nv04_graph_mthd_bind_beta1 }, | ||
1009 | { 0x0194, nv04_graph_mthd_bind_surf_dst }, | ||
1010 | { 0x02fc, nv04_graph_mthd_set_operation }, | ||
1011 | {}, | ||
1012 | }; | ||
1013 | |||
1014 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = { | ||
1015 | { 0x0184, nv04_graph_mthd_bind_chroma }, | ||
1016 | { 0x0188, nv04_graph_mthd_bind_nv04_patt }, | ||
1017 | { 0x018c, nv04_graph_mthd_bind_rop }, | ||
1018 | { 0x0190, nv04_graph_mthd_bind_beta1 }, | ||
1019 | { 0x0194, nv04_graph_mthd_bind_beta4 }, | ||
1020 | { 0x0198, nv04_graph_mthd_bind_surf2d }, | ||
1021 | { 0x02fc, nv04_graph_mthd_set_operation }, | ||
1022 | {}, | ||
1023 | }; | ||
1024 | |||
1025 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = { | ||
1026 | { 0x0188, nv04_graph_mthd_bind_nv01_patt }, | ||
1027 | { 0x018c, nv04_graph_mthd_bind_rop }, | ||
1028 | { 0x0190, nv04_graph_mthd_bind_beta1 }, | ||
1029 | { 0x0194, nv04_graph_mthd_bind_surf_dst }, | ||
1030 | { 0x0304, nv04_graph_mthd_set_operation }, | ||
1031 | {}, | ||
1032 | }; | ||
1033 | |||
1034 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = { | ||
1035 | { 0x0188, nv04_graph_mthd_bind_nv04_patt }, | ||
1036 | { 0x018c, nv04_graph_mthd_bind_rop }, | ||
1037 | { 0x0190, nv04_graph_mthd_bind_beta1 }, | ||
1038 | { 0x0194, nv04_graph_mthd_bind_beta4 }, | ||
1039 | { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf }, | ||
1040 | { 0x0304, nv04_graph_mthd_set_operation }, | ||
1041 | {}, | ||
1042 | }; | ||
1043 | 976 | ||
1044 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = { | 977 | if (dev_priv->engine.graph.registered) |
1045 | { 0x0184, nv04_graph_mthd_bind_clip }, | 978 | return 0; |
1046 | { 0x0188, nv04_graph_mthd_bind_nv01_patt }, | ||
1047 | { 0x018c, nv04_graph_mthd_bind_rop }, | ||
1048 | { 0x0190, nv04_graph_mthd_bind_beta1 }, | ||
1049 | { 0x0194, nv04_graph_mthd_bind_surf_dst }, | ||
1050 | { 0x02fc, nv04_graph_mthd_set_operation }, | ||
1051 | {}, | ||
1052 | }; | ||
1053 | 979 | ||
1054 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = { | 980 | /* dvd subpicture */ |
1055 | { 0x0184, nv04_graph_mthd_bind_clip }, | 981 | NVOBJ_CLASS(dev, 0x0038, GR); |
1056 | { 0x0188, nv04_graph_mthd_bind_nv04_patt }, | 982 | |
1057 | { 0x018c, nv04_graph_mthd_bind_rop }, | 983 | /* m2mf */ |
1058 | { 0x0190, nv04_graph_mthd_bind_beta1 }, | 984 | NVOBJ_CLASS(dev, 0x0039, GR); |
1059 | { 0x0194, nv04_graph_mthd_bind_beta4 }, | 985 | |
1060 | { 0x0198, nv04_graph_mthd_bind_surf2d }, | 986 | /* nv03 gdirect */ |
1061 | { 0x02fc, nv04_graph_mthd_set_operation }, | 987 | NVOBJ_CLASS(dev, 0x004b, GR); |
1062 | {}, | 988 | NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt); |
989 | NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop); | ||
990 | NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1); | ||
991 | NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst); | ||
992 | NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation); | ||
993 | |||
994 | /* nv04 gdirect */ | ||
995 | NVOBJ_CLASS(dev, 0x004a, GR); | ||
996 | NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
997 | NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop); | ||
998 | NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1); | ||
999 | NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1000 | NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d); | ||
1001 | NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation); | ||
1002 | |||
1003 | /* nv01 imageblit */ | ||
1004 | NVOBJ_CLASS(dev, 0x001f, GR); | ||
1005 | NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1006 | NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip); | ||
1007 | NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt); | ||
1008 | NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop); | ||
1009 | NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1); | ||
1010 | NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst); | ||
1011 | NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src); | ||
1012 | NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation); | ||
1013 | |||
1014 | /* nv04 imageblit */ | ||
1015 | NVOBJ_CLASS(dev, 0x005f, GR); | ||
1016 | NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1017 | NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip); | ||
1018 | NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt); | ||
1019 | NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop); | ||
1020 | NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1); | ||
1021 | NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4); | ||
1022 | NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d); | ||
1023 | NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation); | ||
1024 | |||
1025 | /* nv04 iifc */ | ||
1026 | NVOBJ_CLASS(dev, 0x0060, GR); | ||
1027 | NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma); | ||
1028 | NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip); | ||
1029 | NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt); | ||
1030 | NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop); | ||
1031 | NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1); | ||
1032 | NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4); | ||
1033 | NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf); | ||
1034 | NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation); | ||
1035 | |||
1036 | /* nv05 iifc */ | ||
1037 | NVOBJ_CLASS(dev, 0x0064, GR); | ||
1038 | |||
1039 | /* nv01 ifc */ | ||
1040 | NVOBJ_CLASS(dev, 0x0021, GR); | ||
1041 | NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1042 | NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip); | ||
1043 | NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt); | ||
1044 | NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop); | ||
1045 | NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1); | ||
1046 | NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst); | ||
1047 | NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation); | ||
1048 | |||
1049 | /* nv04 ifc */ | ||
1050 | NVOBJ_CLASS(dev, 0x0061, GR); | ||
1051 | NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1052 | NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip); | ||
1053 | NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt); | ||
1054 | NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop); | ||
1055 | NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1); | ||
1056 | NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4); | ||
1057 | NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d); | ||
1058 | NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation); | ||
1059 | |||
1060 | /* nv05 ifc */ | ||
1061 | NVOBJ_CLASS(dev, 0x0065, GR); | ||
1062 | |||
1063 | /* nv03 sifc */ | ||
1064 | NVOBJ_CLASS(dev, 0x0036, GR); | ||
1065 | NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1066 | NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt); | ||
1067 | NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop); | ||
1068 | NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1069 | NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst); | ||
1070 | NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation); | ||
1071 | |||
1072 | /* nv04 sifc */ | ||
1073 | NVOBJ_CLASS(dev, 0x0076, GR); | ||
1074 | NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1075 | NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
1076 | NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop); | ||
1077 | NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1078 | NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1079 | NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d); | ||
1080 | NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation); | ||
1081 | |||
1082 | /* nv05 sifc */ | ||
1083 | NVOBJ_CLASS(dev, 0x0066, GR); | ||
1084 | |||
1085 | /* nv03 sifm */ | ||
1086 | NVOBJ_CLASS(dev, 0x0037, GR); | ||
1087 | NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt); | ||
1088 | NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop); | ||
1089 | NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1090 | NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst); | ||
1091 | NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation); | ||
1092 | |||
1093 | /* nv04 sifm */ | ||
1094 | NVOBJ_CLASS(dev, 0x0077, GR); | ||
1095 | NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
1096 | NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop); | ||
1097 | NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1098 | NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1099 | NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf); | ||
1100 | NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation); | ||
1101 | |||
1102 | /* null */ | ||
1103 | NVOBJ_CLASS(dev, 0x0030, GR); | ||
1104 | |||
1105 | /* surf2d */ | ||
1106 | NVOBJ_CLASS(dev, 0x0042, GR); | ||
1107 | |||
1108 | /* rop */ | ||
1109 | NVOBJ_CLASS(dev, 0x0043, GR); | ||
1110 | |||
1111 | /* beta1 */ | ||
1112 | NVOBJ_CLASS(dev, 0x0012, GR); | ||
1113 | |||
1114 | /* beta4 */ | ||
1115 | NVOBJ_CLASS(dev, 0x0072, GR); | ||
1116 | |||
1117 | /* cliprect */ | ||
1118 | NVOBJ_CLASS(dev, 0x0019, GR); | ||
1119 | |||
1120 | /* nv01 pattern */ | ||
1121 | NVOBJ_CLASS(dev, 0x0018, GR); | ||
1122 | |||
1123 | /* nv04 pattern */ | ||
1124 | NVOBJ_CLASS(dev, 0x0044, GR); | ||
1125 | |||
1126 | /* swzsurf */ | ||
1127 | NVOBJ_CLASS(dev, 0x0052, GR); | ||
1128 | |||
1129 | /* surf3d */ | ||
1130 | NVOBJ_CLASS(dev, 0x0053, GR); | ||
1131 | NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h); | ||
1132 | NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v); | ||
1133 | |||
1134 | /* nv03 tex_tri */ | ||
1135 | NVOBJ_CLASS(dev, 0x0048, GR); | ||
1136 | NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip); | ||
1137 | NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color); | ||
1138 | NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta); | ||
1139 | |||
1140 | /* tex_tri */ | ||
1141 | NVOBJ_CLASS(dev, 0x0054, GR); | ||
1142 | |||
1143 | /* multitex_tri */ | ||
1144 | NVOBJ_CLASS(dev, 0x0055, GR); | ||
1145 | |||
1146 | /* nv01 chroma */ | ||
1147 | NVOBJ_CLASS(dev, 0x0017, GR); | ||
1148 | |||
1149 | /* nv04 chroma */ | ||
1150 | NVOBJ_CLASS(dev, 0x0057, GR); | ||
1151 | |||
1152 | /* surf_dst */ | ||
1153 | NVOBJ_CLASS(dev, 0x0058, GR); | ||
1154 | |||
1155 | /* surf_src */ | ||
1156 | NVOBJ_CLASS(dev, 0x0059, GR); | ||
1157 | |||
1158 | /* surf_color */ | ||
1159 | NVOBJ_CLASS(dev, 0x005a, GR); | ||
1160 | |||
1161 | /* surf_zeta */ | ||
1162 | NVOBJ_CLASS(dev, 0x005b, GR); | ||
1163 | |||
1164 | /* nv01 line */ | ||
1165 | NVOBJ_CLASS(dev, 0x001c, GR); | ||
1166 | NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip); | ||
1167 | NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt); | ||
1168 | NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop); | ||
1169 | NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1170 | NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst); | ||
1171 | NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation); | ||
1172 | |||
1173 | /* nv04 line */ | ||
1174 | NVOBJ_CLASS(dev, 0x005c, GR); | ||
1175 | NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip); | ||
1176 | NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
1177 | NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop); | ||
1178 | NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1179 | NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1180 | NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d); | ||
1181 | NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation); | ||
1182 | |||
1183 | /* nv01 tri */ | ||
1184 | NVOBJ_CLASS(dev, 0x001d, GR); | ||
1185 | NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip); | ||
1186 | NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt); | ||
1187 | NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop); | ||
1188 | NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1189 | NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst); | ||
1190 | NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation); | ||
1191 | |||
1192 | /* nv04 tri */ | ||
1193 | NVOBJ_CLASS(dev, 0x005d, GR); | ||
1194 | NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip); | ||
1195 | NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
1196 | NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop); | ||
1197 | NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1198 | NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1199 | NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d); | ||
1200 | NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation); | ||
1201 | |||
1202 | /* nv01 rect */ | ||
1203 | NVOBJ_CLASS(dev, 0x001e, GR); | ||
1204 | NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip); | ||
1205 | NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt); | ||
1206 | NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop); | ||
1207 | NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1208 | NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst); | ||
1209 | NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation); | ||
1210 | |||
1211 | /* nv04 rect */ | ||
1212 | NVOBJ_CLASS(dev, 0x005e, GR); | ||
1213 | NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip); | ||
1214 | NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
1215 | NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop); | ||
1216 | NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1217 | NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1218 | NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d); | ||
1219 | NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation); | ||
1220 | |||
1221 | /* nvsw */ | ||
1222 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
1223 | NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref); | ||
1224 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
1225 | |||
1226 | dev_priv->engine.graph.registered = true; | ||
1227 | return 0; | ||
1063 | }; | 1228 | }; |
1064 | 1229 | ||
1065 | static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = { | 1230 | static struct nouveau_bitfield nv04_graph_intr[] = { |
1066 | { 0x0188, nv04_graph_mthd_bind_clip }, | 1231 | { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, |
1067 | { 0x018c, nv04_graph_mthd_bind_surf_color }, | 1232 | {} |
1068 | { 0x0190, nv04_graph_mthd_bind_surf_zeta }, | ||
1069 | {}, | ||
1070 | }; | 1233 | }; |
1071 | 1234 | ||
1072 | static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = { | 1235 | static struct nouveau_bitfield nv04_graph_nstatus[] = |
1073 | { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, | 1236 | { |
1074 | { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, | 1237 | { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, |
1075 | {}, | 1238 | { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, |
1239 | { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, | ||
1240 | { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, | ||
1241 | {} | ||
1076 | }; | 1242 | }; |
1077 | 1243 | ||
1078 | struct nouveau_pgraph_object_class nv04_graph_grclass[] = { | 1244 | struct nouveau_bitfield nv04_graph_nsource[] = |
1079 | { 0x0038, false, NULL }, /* dvd subpicture */ | 1245 | { |
1080 | { 0x0039, false, NULL }, /* m2mf */ | 1246 | { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, |
1081 | { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */ | 1247 | { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, |
1082 | { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */ | 1248 | { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, |
1083 | { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */ | 1249 | { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, |
1084 | { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */ | 1250 | { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, |
1085 | { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */ | 1251 | { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, |
1086 | { 0x0064, false, NULL }, /* nv05 iifc */ | 1252 | { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, |
1087 | { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */ | 1253 | { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, |
1088 | { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */ | 1254 | { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, |
1089 | { 0x0065, false, NULL }, /* nv05 ifc */ | 1255 | { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, |
1090 | { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */ | 1256 | { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, |
1091 | { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */ | 1257 | { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, |
1092 | { 0x0066, false, NULL }, /* nv05 sifc */ | 1258 | { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, |
1093 | { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */ | 1259 | { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, |
1094 | { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */ | 1260 | { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, |
1095 | { 0x0030, false, NULL }, /* null */ | 1261 | { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, |
1096 | { 0x0042, false, NULL }, /* surf2d */ | 1262 | { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, |
1097 | { 0x0043, false, NULL }, /* rop */ | 1263 | { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, |
1098 | { 0x0012, false, NULL }, /* beta1 */ | 1264 | { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, |
1099 | { 0x0072, false, NULL }, /* beta4 */ | ||
1100 | { 0x0019, false, NULL }, /* cliprect */ | ||
1101 | { 0x0018, false, NULL }, /* nv01 pattern */ | ||
1102 | { 0x0044, false, NULL }, /* nv04 pattern */ | ||
1103 | { 0x0052, false, NULL }, /* swzsurf */ | ||
1104 | { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */ | ||
1105 | { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */ | ||
1106 | { 0x0054, false, NULL }, /* tex_tri */ | ||
1107 | { 0x0055, false, NULL }, /* multitex_tri */ | ||
1108 | { 0x0017, false, NULL }, /* nv01 chroma */ | ||
1109 | { 0x0057, false, NULL }, /* nv04 chroma */ | ||
1110 | { 0x0058, false, NULL }, /* surf_dst */ | ||
1111 | { 0x0059, false, NULL }, /* surf_src */ | ||
1112 | { 0x005a, false, NULL }, /* surf_color */ | ||
1113 | { 0x005b, false, NULL }, /* surf_zeta */ | ||
1114 | { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */ | ||
1115 | { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */ | ||
1116 | { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */ | ||
1117 | { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */ | ||
1118 | { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */ | ||
1119 | { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */ | ||
1120 | { 0x506e, true, nv04_graph_mthds_sw }, | ||
1121 | {} | 1265 | {} |
1122 | }; | 1266 | }; |
1123 | 1267 | ||
1268 | static void | ||
1269 | nv04_graph_isr(struct drm_device *dev) | ||
1270 | { | ||
1271 | u32 stat; | ||
1272 | |||
1273 | while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { | ||
1274 | u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | ||
1275 | u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); | ||
1276 | u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
1277 | u32 chid = (addr & 0x0f000000) >> 24; | ||
1278 | u32 subc = (addr & 0x0000e000) >> 13; | ||
1279 | u32 mthd = (addr & 0x00001ffc); | ||
1280 | u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
1281 | u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff; | ||
1282 | u32 show = stat; | ||
1283 | |||
1284 | if (stat & NV_PGRAPH_INTR_NOTIFY) { | ||
1285 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | ||
1286 | if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) | ||
1287 | show &= ~NV_PGRAPH_INTR_NOTIFY; | ||
1288 | } | ||
1289 | } | ||
1290 | |||
1291 | if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { | ||
1292 | nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
1293 | stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
1294 | show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
1295 | nv04_graph_context_switch(dev); | ||
1296 | } | ||
1297 | |||
1298 | nv_wr32(dev, NV03_PGRAPH_INTR, stat); | ||
1299 | nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); | ||
1300 | |||
1301 | if (show && nouveau_ratelimit()) { | ||
1302 | NV_INFO(dev, "PGRAPH -"); | ||
1303 | nouveau_bitfield_print(nv04_graph_intr, show); | ||
1304 | printk(" nsource:"); | ||
1305 | nouveau_bitfield_print(nv04_graph_nsource, nsource); | ||
1306 | printk(" nstatus:"); | ||
1307 | nouveau_bitfield_print(nv04_graph_nstatus, nstatus); | ||
1308 | printk("\n"); | ||
1309 | NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " | ||
1310 | "mthd 0x%04x data 0x%08x\n", | ||
1311 | chid, subc, class, mthd, data); | ||
1312 | } | ||
1313 | } | ||
1314 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 0b5ae297abde..b8e3edb5c063 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c | |||
@@ -98,42 +98,66 @@ nv04_instmem_takedown(struct drm_device *dev) | |||
98 | } | 98 | } |
99 | 99 | ||
100 | int | 100 | int |
101 | nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, | 101 | nv04_instmem_suspend(struct drm_device *dev) |
102 | uint32_t *sz) | ||
103 | { | 102 | { |
104 | return 0; | 103 | return 0; |
105 | } | 104 | } |
106 | 105 | ||
107 | void | 106 | void |
108 | nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 107 | nv04_instmem_resume(struct drm_device *dev) |
109 | { | ||
110 | } | ||
111 | |||
112 | int | ||
113 | nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | ||
114 | { | 108 | { |
115 | return 0; | ||
116 | } | 109 | } |
117 | 110 | ||
118 | int | 111 | int |
119 | nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 112 | nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) |
120 | { | 113 | { |
114 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | ||
115 | struct drm_mm_node *ramin = NULL; | ||
116 | |||
117 | do { | ||
118 | if (drm_mm_pre_get(&dev_priv->ramin_heap)) | ||
119 | return -ENOMEM; | ||
120 | |||
121 | spin_lock(&dev_priv->ramin_lock); | ||
122 | ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0); | ||
123 | if (ramin == NULL) { | ||
124 | spin_unlock(&dev_priv->ramin_lock); | ||
125 | return -ENOMEM; | ||
126 | } | ||
127 | |||
128 | ramin = drm_mm_get_block_atomic(ramin, size, align); | ||
129 | spin_unlock(&dev_priv->ramin_lock); | ||
130 | } while (ramin == NULL); | ||
131 | |||
132 | gpuobj->node = ramin; | ||
133 | gpuobj->vinst = ramin->start; | ||
121 | return 0; | 134 | return 0; |
122 | } | 135 | } |
123 | 136 | ||
124 | void | 137 | void |
125 | nv04_instmem_flush(struct drm_device *dev) | 138 | nv04_instmem_put(struct nouveau_gpuobj *gpuobj) |
126 | { | 139 | { |
140 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | ||
141 | |||
142 | spin_lock(&dev_priv->ramin_lock); | ||
143 | drm_mm_put_block(gpuobj->node); | ||
144 | gpuobj->node = NULL; | ||
145 | spin_unlock(&dev_priv->ramin_lock); | ||
127 | } | 146 | } |
128 | 147 | ||
129 | int | 148 | int |
130 | nv04_instmem_suspend(struct drm_device *dev) | 149 | nv04_instmem_map(struct nouveau_gpuobj *gpuobj) |
131 | { | 150 | { |
151 | gpuobj->pinst = gpuobj->vinst; | ||
132 | return 0; | 152 | return 0; |
133 | } | 153 | } |
134 | 154 | ||
135 | void | 155 | void |
136 | nv04_instmem_resume(struct drm_device *dev) | 156 | nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj) |
137 | { | 157 | { |
138 | } | 158 | } |
139 | 159 | ||
160 | void | ||
161 | nv04_instmem_flush(struct drm_device *dev) | ||
162 | { | ||
163 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c index cc5cda44e501..f78181a59b4a 100644 --- a/drivers/gpu/drm/nouveau/nv10_fb.c +++ b/drivers/gpu/drm/nouveau/nv10_fb.c | |||
@@ -3,23 +3,109 @@ | |||
3 | #include "nouveau_drv.h" | 3 | #include "nouveau_drv.h" |
4 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
5 | 5 | ||
6 | static struct drm_mm_node * | ||
7 | nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size) | ||
8 | { | ||
9 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
10 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
11 | struct drm_mm_node *mem; | ||
12 | int ret; | ||
13 | |||
14 | ret = drm_mm_pre_get(&pfb->tag_heap); | ||
15 | if (ret) | ||
16 | return NULL; | ||
17 | |||
18 | spin_lock(&dev_priv->tile.lock); | ||
19 | mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0); | ||
20 | if (mem) | ||
21 | mem = drm_mm_get_block_atomic(mem, size, 0); | ||
22 | spin_unlock(&dev_priv->tile.lock); | ||
23 | |||
24 | return mem; | ||
25 | } | ||
26 | |||
27 | static void | ||
28 | nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem) | ||
29 | { | ||
30 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
31 | |||
32 | spin_lock(&dev_priv->tile.lock); | ||
33 | drm_mm_put_block(mem); | ||
34 | spin_unlock(&dev_priv->tile.lock); | ||
35 | } | ||
36 | |||
37 | void | ||
38 | nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, | ||
39 | uint32_t size, uint32_t pitch, uint32_t flags) | ||
40 | { | ||
41 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
42 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
43 | int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16); | ||
44 | |||
45 | tile->addr = addr; | ||
46 | tile->limit = max(1u, addr + size) - 1; | ||
47 | tile->pitch = pitch; | ||
48 | |||
49 | if (dev_priv->card_type == NV_20) { | ||
50 | if (flags & NOUVEAU_GEM_TILE_ZETA) { | ||
51 | /* | ||
52 | * Allocate some of the on-die tag memory, | ||
53 | * used to store Z compression meta-data (most | ||
54 | * likely just a bitmap determining if a given | ||
55 | * tile is compressed or not). | ||
56 | */ | ||
57 | tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256); | ||
58 | |||
59 | if (tile->tag_mem) { | ||
60 | /* Enable Z compression */ | ||
61 | if (dev_priv->chipset >= 0x25) | ||
62 | tile->zcomp = tile->tag_mem->start | | ||
63 | (bpp == 16 ? | ||
64 | NV25_PFB_ZCOMP_MODE_16 : | ||
65 | NV25_PFB_ZCOMP_MODE_32); | ||
66 | else | ||
67 | tile->zcomp = tile->tag_mem->start | | ||
68 | NV20_PFB_ZCOMP_EN | | ||
69 | (bpp == 16 ? 0 : | ||
70 | NV20_PFB_ZCOMP_MODE_32); | ||
71 | } | ||
72 | |||
73 | tile->addr |= 3; | ||
74 | } else { | ||
75 | tile->addr |= 1; | ||
76 | } | ||
77 | |||
78 | } else { | ||
79 | tile->addr |= 1 << 31; | ||
80 | } | ||
81 | } | ||
82 | |||
6 | void | 83 | void |
7 | nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | 84 | nv10_fb_free_tile_region(struct drm_device *dev, int i) |
8 | uint32_t size, uint32_t pitch) | ||
9 | { | 85 | { |
10 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 86 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
11 | uint32_t limit = max(1u, addr + size) - 1; | 87 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
12 | 88 | ||
13 | if (pitch) { | 89 | if (tile->tag_mem) { |
14 | if (dev_priv->card_type >= NV_20) | 90 | nv20_fb_free_tag(dev, tile->tag_mem); |
15 | addr |= 1; | 91 | tile->tag_mem = NULL; |
16 | else | ||
17 | addr |= 1 << 31; | ||
18 | } | 92 | } |
19 | 93 | ||
20 | nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); | 94 | tile->addr = tile->limit = tile->pitch = tile->zcomp = 0; |
21 | nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); | 95 | } |
22 | nv_wr32(dev, NV10_PFB_TILE(i), addr); | 96 | |
97 | void | ||
98 | nv10_fb_set_tile_region(struct drm_device *dev, int i) | ||
99 | { | ||
100 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
101 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
102 | |||
103 | nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); | ||
104 | nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); | ||
105 | nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); | ||
106 | |||
107 | if (dev_priv->card_type == NV_20) | ||
108 | nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp); | ||
23 | } | 109 | } |
24 | 110 | ||
25 | int | 111 | int |
@@ -31,9 +117,14 @@ nv10_fb_init(struct drm_device *dev) | |||
31 | 117 | ||
32 | pfb->num_tiles = NV10_PFB_TILE__SIZE; | 118 | pfb->num_tiles = NV10_PFB_TILE__SIZE; |
33 | 119 | ||
120 | if (dev_priv->card_type == NV_20) | ||
121 | drm_mm_init(&pfb->tag_heap, 0, | ||
122 | (dev_priv->chipset >= 0x25 ? | ||
123 | 64 * 1024 : 32 * 1024)); | ||
124 | |||
34 | /* Turn all the tiling regions off. */ | 125 | /* Turn all the tiling regions off. */ |
35 | for (i = 0; i < pfb->num_tiles; i++) | 126 | for (i = 0; i < pfb->num_tiles; i++) |
36 | pfb->set_region_tiling(dev, i, 0, 0, 0); | 127 | pfb->set_tile_region(dev, i); |
37 | 128 | ||
38 | return 0; | 129 | return 0; |
39 | } | 130 | } |
@@ -41,4 +132,13 @@ nv10_fb_init(struct drm_device *dev) | |||
41 | void | 132 | void |
42 | nv10_fb_takedown(struct drm_device *dev) | 133 | nv10_fb_takedown(struct drm_device *dev) |
43 | { | 134 | { |
135 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
136 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
137 | int i; | ||
138 | |||
139 | for (i = 0; i < pfb->num_tiles; i++) | ||
140 | pfb->free_tile_region(dev, i); | ||
141 | |||
142 | if (dev_priv->card_type == NV_20) | ||
143 | drm_mm_takedown(&pfb->tag_heap); | ||
44 | } | 144 | } |
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index f1b03ad58fd5..d2ecbff4bee1 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c | |||
@@ -53,6 +53,11 @@ nv10_fifo_create_context(struct nouveau_channel *chan) | |||
53 | if (ret) | 53 | if (ret) |
54 | return ret; | 54 | return ret; |
55 | 55 | ||
56 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
57 | NV03_USER(chan->id), PAGE_SIZE); | ||
58 | if (!chan->user) | ||
59 | return -ENOMEM; | ||
60 | |||
56 | /* Fill entries that are seen filled in dumps of nvidia driver just | 61 | /* Fill entries that are seen filled in dumps of nvidia driver just |
57 | * after channel's is put into DMA mode | 62 | * after channel's is put into DMA mode |
58 | */ | 63 | */ |
@@ -73,17 +78,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan) | |||
73 | return 0; | 78 | return 0; |
74 | } | 79 | } |
75 | 80 | ||
76 | void | ||
77 | nv10_fifo_destroy_context(struct nouveau_channel *chan) | ||
78 | { | ||
79 | struct drm_device *dev = chan->dev; | ||
80 | |||
81 | nv_wr32(dev, NV04_PFIFO_MODE, | ||
82 | nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); | ||
83 | |||
84 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | ||
85 | } | ||
86 | |||
87 | static void | 81 | static void |
88 | nv10_fifo_do_load_context(struct drm_device *dev, int chid) | 82 | nv10_fifo_do_load_context(struct drm_device *dev, int chid) |
89 | { | 83 | { |
@@ -219,6 +213,7 @@ nv10_fifo_init_ramxx(struct drm_device *dev) | |||
219 | static void | 213 | static void |
220 | nv10_fifo_init_intr(struct drm_device *dev) | 214 | nv10_fifo_init_intr(struct drm_device *dev) |
221 | { | 215 | { |
216 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
222 | nv_wr32(dev, 0x002100, 0xffffffff); | 217 | nv_wr32(dev, 0x002100, 0xffffffff); |
223 | nv_wr32(dev, 0x002140, 0xffffffff); | 218 | nv_wr32(dev, 0x002140, 0xffffffff); |
224 | } | 219 | } |
@@ -241,7 +236,7 @@ nv10_fifo_init(struct drm_device *dev) | |||
241 | pfifo->reassign(dev, true); | 236 | pfifo->reassign(dev, true); |
242 | 237 | ||
243 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 238 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
244 | if (dev_priv->fifos[i]) { | 239 | if (dev_priv->channels.ptr[i]) { |
245 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | 240 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); |
246 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | 241 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); |
247 | } | 242 | } |
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 8e68c9731159..8c92edb7bbcd 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c | |||
@@ -26,6 +26,10 @@ | |||
26 | #include "drm.h" | 26 | #include "drm.h" |
27 | #include "nouveau_drm.h" | 27 | #include "nouveau_drm.h" |
28 | #include "nouveau_drv.h" | 28 | #include "nouveau_drv.h" |
29 | #include "nouveau_util.h" | ||
30 | |||
31 | static int nv10_graph_register(struct drm_device *); | ||
32 | static void nv10_graph_isr(struct drm_device *); | ||
29 | 33 | ||
30 | #define NV10_FIFO_NUMBER 32 | 34 | #define NV10_FIFO_NUMBER 32 |
31 | 35 | ||
@@ -786,15 +790,13 @@ nv10_graph_unload_context(struct drm_device *dev) | |||
786 | return 0; | 790 | return 0; |
787 | } | 791 | } |
788 | 792 | ||
789 | void | 793 | static void |
790 | nv10_graph_context_switch(struct drm_device *dev) | 794 | nv10_graph_context_switch(struct drm_device *dev) |
791 | { | 795 | { |
792 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 796 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
793 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
794 | struct nouveau_channel *chan = NULL; | 797 | struct nouveau_channel *chan = NULL; |
795 | int chid; | 798 | int chid; |
796 | 799 | ||
797 | pgraph->fifo_access(dev, false); | ||
798 | nouveau_wait_for_idle(dev); | 800 | nouveau_wait_for_idle(dev); |
799 | 801 | ||
800 | /* If previous context is valid, we need to save it */ | 802 | /* If previous context is valid, we need to save it */ |
@@ -802,11 +804,9 @@ nv10_graph_context_switch(struct drm_device *dev) | |||
802 | 804 | ||
803 | /* Load context for next channel */ | 805 | /* Load context for next channel */ |
804 | chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; | 806 | chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; |
805 | chan = dev_priv->fifos[chid]; | 807 | chan = dev_priv->channels.ptr[chid]; |
806 | if (chan && chan->pgraph_ctx) | 808 | if (chan && chan->pgraph_ctx) |
807 | nv10_graph_load_context(chan); | 809 | nv10_graph_load_context(chan); |
808 | |||
809 | pgraph->fifo_access(dev, true); | ||
810 | } | 810 | } |
811 | 811 | ||
812 | #define NV_WRITE_CTX(reg, val) do { \ | 812 | #define NV_WRITE_CTX(reg, val) do { \ |
@@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev) | |||
833 | if (chid >= dev_priv->engine.fifo.channels) | 833 | if (chid >= dev_priv->engine.fifo.channels) |
834 | return NULL; | 834 | return NULL; |
835 | 835 | ||
836 | return dev_priv->fifos[chid]; | 836 | return dev_priv->channels.ptr[chid]; |
837 | } | 837 | } |
838 | 838 | ||
839 | int nv10_graph_create_context(struct nouveau_channel *chan) | 839 | int nv10_graph_create_context(struct nouveau_channel *chan) |
@@ -875,37 +875,54 @@ int nv10_graph_create_context(struct nouveau_channel *chan) | |||
875 | 875 | ||
876 | void nv10_graph_destroy_context(struct nouveau_channel *chan) | 876 | void nv10_graph_destroy_context(struct nouveau_channel *chan) |
877 | { | 877 | { |
878 | struct drm_device *dev = chan->dev; | ||
879 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
880 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
878 | struct graph_state *pgraph_ctx = chan->pgraph_ctx; | 881 | struct graph_state *pgraph_ctx = chan->pgraph_ctx; |
882 | unsigned long flags; | ||
883 | |||
884 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
885 | pgraph->fifo_access(dev, false); | ||
886 | |||
887 | /* Unload the context if it's the currently active one */ | ||
888 | if (pgraph->channel(dev) == chan) | ||
889 | pgraph->unload_context(dev); | ||
879 | 890 | ||
891 | /* Free the context resources */ | ||
880 | kfree(pgraph_ctx); | 892 | kfree(pgraph_ctx); |
881 | chan->pgraph_ctx = NULL; | 893 | chan->pgraph_ctx = NULL; |
894 | |||
895 | pgraph->fifo_access(dev, true); | ||
896 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
882 | } | 897 | } |
883 | 898 | ||
884 | void | 899 | void |
885 | nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | 900 | nv10_graph_set_tile_region(struct drm_device *dev, int i) |
886 | uint32_t size, uint32_t pitch) | ||
887 | { | 901 | { |
888 | uint32_t limit = max(1u, addr + size) - 1; | 902 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
889 | 903 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | |
890 | if (pitch) | ||
891 | addr |= 1 << 31; | ||
892 | 904 | ||
893 | nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit); | 905 | nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit); |
894 | nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch); | 906 | nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch); |
895 | nv_wr32(dev, NV10_PGRAPH_TILE(i), addr); | 907 | nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr); |
896 | } | 908 | } |
897 | 909 | ||
898 | int nv10_graph_init(struct drm_device *dev) | 910 | int nv10_graph_init(struct drm_device *dev) |
899 | { | 911 | { |
900 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 912 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
901 | uint32_t tmp; | 913 | uint32_t tmp; |
902 | int i; | 914 | int ret, i; |
903 | 915 | ||
904 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | 916 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & |
905 | ~NV_PMC_ENABLE_PGRAPH); | 917 | ~NV_PMC_ENABLE_PGRAPH); |
906 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | | 918 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | |
907 | NV_PMC_ENABLE_PGRAPH); | 919 | NV_PMC_ENABLE_PGRAPH); |
908 | 920 | ||
921 | ret = nv10_graph_register(dev); | ||
922 | if (ret) | ||
923 | return ret; | ||
924 | |||
925 | nouveau_irq_register(dev, 12, nv10_graph_isr); | ||
909 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | 926 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); |
910 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); | 927 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); |
911 | 928 | ||
@@ -928,7 +945,7 @@ int nv10_graph_init(struct drm_device *dev) | |||
928 | 945 | ||
929 | /* Turn all the tiling regions off. */ | 946 | /* Turn all the tiling regions off. */ |
930 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) | 947 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) |
931 | nv10_graph_set_region_tiling(dev, i, 0, 0, 0); | 948 | nv10_graph_set_tile_region(dev, i); |
932 | 949 | ||
933 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); | 950 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); |
934 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); | 951 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); |
@@ -948,17 +965,17 @@ int nv10_graph_init(struct drm_device *dev) | |||
948 | 965 | ||
949 | void nv10_graph_takedown(struct drm_device *dev) | 966 | void nv10_graph_takedown(struct drm_device *dev) |
950 | { | 967 | { |
968 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); | ||
969 | nouveau_irq_unregister(dev, 12); | ||
951 | } | 970 | } |
952 | 971 | ||
953 | static int | 972 | static int |
954 | nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, | 973 | nv17_graph_mthd_lma_window(struct nouveau_channel *chan, |
955 | int mthd, uint32_t data) | 974 | u32 class, u32 mthd, u32 data) |
956 | { | 975 | { |
957 | struct drm_device *dev = chan->dev; | 976 | struct drm_device *dev = chan->dev; |
958 | struct graph_state *ctx = chan->pgraph_ctx; | 977 | struct graph_state *ctx = chan->pgraph_ctx; |
959 | struct pipe_state *pipe = &ctx->pipe_state; | 978 | struct pipe_state *pipe = &ctx->pipe_state; |
960 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
961 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
962 | uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; | 979 | uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; |
963 | uint32_t xfmode0, xfmode1; | 980 | uint32_t xfmode0, xfmode1; |
964 | int i; | 981 | int i; |
@@ -1025,18 +1042,14 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, | |||
1025 | 1042 | ||
1026 | nouveau_wait_for_idle(dev); | 1043 | nouveau_wait_for_idle(dev); |
1027 | 1044 | ||
1028 | pgraph->fifo_access(dev, true); | ||
1029 | |||
1030 | return 0; | 1045 | return 0; |
1031 | } | 1046 | } |
1032 | 1047 | ||
1033 | static int | 1048 | static int |
1034 | nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, | 1049 | nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, |
1035 | int mthd, uint32_t data) | 1050 | u32 class, u32 mthd, u32 data) |
1036 | { | 1051 | { |
1037 | struct drm_device *dev = chan->dev; | 1052 | struct drm_device *dev = chan->dev; |
1038 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1039 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
1040 | 1053 | ||
1041 | nouveau_wait_for_idle(dev); | 1054 | nouveau_wait_for_idle(dev); |
1042 | 1055 | ||
@@ -1045,40 +1058,118 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, | |||
1045 | nv_wr32(dev, 0x004006b0, | 1058 | nv_wr32(dev, 0x004006b0, |
1046 | nv_rd32(dev, 0x004006b0) | 0x8 << 24); | 1059 | nv_rd32(dev, 0x004006b0) | 0x8 << 24); |
1047 | 1060 | ||
1048 | pgraph->fifo_access(dev, true); | 1061 | return 0; |
1062 | } | ||
1063 | |||
1064 | static int | ||
1065 | nv10_graph_register(struct drm_device *dev) | ||
1066 | { | ||
1067 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1068 | |||
1069 | if (dev_priv->engine.graph.registered) | ||
1070 | return 0; | ||
1071 | |||
1072 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
1073 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
1074 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ | ||
1075 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ | ||
1076 | NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */ | ||
1077 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ | ||
1078 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ | ||
1079 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ | ||
1080 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ | ||
1081 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ | ||
1082 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ | ||
1083 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ | ||
1084 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | ||
1085 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ | ||
1086 | NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */ | ||
1087 | NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */ | ||
1088 | NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */ | ||
1089 | NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */ | ||
1090 | |||
1091 | /* celcius */ | ||
1092 | if (dev_priv->chipset <= 0x10) { | ||
1093 | NVOBJ_CLASS(dev, 0x0056, GR); | ||
1094 | } else | ||
1095 | if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) { | ||
1096 | NVOBJ_CLASS(dev, 0x0096, GR); | ||
1097 | } else { | ||
1098 | NVOBJ_CLASS(dev, 0x0099, GR); | ||
1099 | NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window); | ||
1100 | NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window); | ||
1101 | NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window); | ||
1102 | NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window); | ||
1103 | NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable); | ||
1104 | } | ||
1049 | 1105 | ||
1106 | /* nvsw */ | ||
1107 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
1108 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
1109 | |||
1110 | dev_priv->engine.graph.registered = true; | ||
1050 | return 0; | 1111 | return 0; |
1051 | } | 1112 | } |
1052 | 1113 | ||
1053 | static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = { | 1114 | struct nouveau_bitfield nv10_graph_intr[] = { |
1054 | { 0x1638, nv17_graph_mthd_lma_window }, | 1115 | { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, |
1055 | { 0x163c, nv17_graph_mthd_lma_window }, | 1116 | { NV_PGRAPH_INTR_ERROR, "ERROR" }, |
1056 | { 0x1640, nv17_graph_mthd_lma_window }, | ||
1057 | { 0x1644, nv17_graph_mthd_lma_window }, | ||
1058 | { 0x1658, nv17_graph_mthd_lma_enable }, | ||
1059 | {} | 1117 | {} |
1060 | }; | 1118 | }; |
1061 | 1119 | ||
1062 | struct nouveau_pgraph_object_class nv10_graph_grclass[] = { | 1120 | struct nouveau_bitfield nv10_graph_nstatus[] = |
1063 | { 0x0030, false, NULL }, /* null */ | 1121 | { |
1064 | { 0x0039, false, NULL }, /* m2mf */ | 1122 | { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, |
1065 | { 0x004a, false, NULL }, /* gdirect */ | 1123 | { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, |
1066 | { 0x005f, false, NULL }, /* imageblit */ | 1124 | { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, |
1067 | { 0x009f, false, NULL }, /* imageblit (nv12) */ | 1125 | { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, |
1068 | { 0x008a, false, NULL }, /* ifc */ | ||
1069 | { 0x0089, false, NULL }, /* sifm */ | ||
1070 | { 0x0062, false, NULL }, /* surf2d */ | ||
1071 | { 0x0043, false, NULL }, /* rop */ | ||
1072 | { 0x0012, false, NULL }, /* beta1 */ | ||
1073 | { 0x0072, false, NULL }, /* beta4 */ | ||
1074 | { 0x0019, false, NULL }, /* cliprect */ | ||
1075 | { 0x0044, false, NULL }, /* pattern */ | ||
1076 | { 0x0052, false, NULL }, /* swzsurf */ | ||
1077 | { 0x0093, false, NULL }, /* surf3d */ | ||
1078 | { 0x0094, false, NULL }, /* tex_tri */ | ||
1079 | { 0x0095, false, NULL }, /* multitex_tri */ | ||
1080 | { 0x0056, false, NULL }, /* celcius (nv10) */ | ||
1081 | { 0x0096, false, NULL }, /* celcius (nv11) */ | ||
1082 | { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */ | ||
1083 | {} | 1126 | {} |
1084 | }; | 1127 | }; |
1128 | |||
1129 | static void | ||
1130 | nv10_graph_isr(struct drm_device *dev) | ||
1131 | { | ||
1132 | u32 stat; | ||
1133 | |||
1134 | while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { | ||
1135 | u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | ||
1136 | u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); | ||
1137 | u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
1138 | u32 chid = (addr & 0x01f00000) >> 20; | ||
1139 | u32 subc = (addr & 0x00070000) >> 16; | ||
1140 | u32 mthd = (addr & 0x00001ffc); | ||
1141 | u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
1142 | u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; | ||
1143 | u32 show = stat; | ||
1144 | |||
1145 | if (stat & NV_PGRAPH_INTR_ERROR) { | ||
1146 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | ||
1147 | if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) | ||
1148 | show &= ~NV_PGRAPH_INTR_ERROR; | ||
1149 | } | ||
1150 | } | ||
1151 | |||
1152 | if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { | ||
1153 | nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
1154 | stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
1155 | show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
1156 | nv10_graph_context_switch(dev); | ||
1157 | } | ||
1158 | |||
1159 | nv_wr32(dev, NV03_PGRAPH_INTR, stat); | ||
1160 | nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); | ||
1161 | |||
1162 | if (show && nouveau_ratelimit()) { | ||
1163 | NV_INFO(dev, "PGRAPH -"); | ||
1164 | nouveau_bitfield_print(nv10_graph_intr, show); | ||
1165 | printk(" nsource:"); | ||
1166 | nouveau_bitfield_print(nv04_graph_nsource, nsource); | ||
1167 | printk(" nstatus:"); | ||
1168 | nouveau_bitfield_print(nv10_graph_nstatus, nstatus); | ||
1169 | printk("\n"); | ||
1170 | NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " | ||
1171 | "mthd 0x%04x data 0x%08x\n", | ||
1172 | chid, subc, class, mthd, data); | ||
1173 | } | ||
1174 | } | ||
1175 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 12ab9cd56eca..8464b76798d5 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c | |||
@@ -32,6 +32,10 @@ | |||
32 | #define NV34_GRCTX_SIZE (18140) | 32 | #define NV34_GRCTX_SIZE (18140) |
33 | #define NV35_36_GRCTX_SIZE (22396) | 33 | #define NV35_36_GRCTX_SIZE (22396) |
34 | 34 | ||
35 | static int nv20_graph_register(struct drm_device *); | ||
36 | static int nv30_graph_register(struct drm_device *); | ||
37 | static void nv20_graph_isr(struct drm_device *); | ||
38 | |||
35 | static void | 39 | static void |
36 | nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) | 40 | nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) |
37 | { | 41 | { |
@@ -425,9 +429,21 @@ nv20_graph_destroy_context(struct nouveau_channel *chan) | |||
425 | struct drm_device *dev = chan->dev; | 429 | struct drm_device *dev = chan->dev; |
426 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 430 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
427 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | 431 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
432 | unsigned long flags; | ||
428 | 433 | ||
429 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); | 434 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
435 | pgraph->fifo_access(dev, false); | ||
436 | |||
437 | /* Unload the context if it's the currently active one */ | ||
438 | if (pgraph->channel(dev) == chan) | ||
439 | pgraph->unload_context(dev); | ||
440 | |||
441 | pgraph->fifo_access(dev, true); | ||
442 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
443 | |||
444 | /* Free the context resources */ | ||
430 | nv_wo32(pgraph->ctx_table, chan->id * 4, 0); | 445 | nv_wo32(pgraph->ctx_table, chan->id * 4, 0); |
446 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); | ||
431 | } | 447 | } |
432 | 448 | ||
433 | int | 449 | int |
@@ -496,24 +512,27 @@ nv20_graph_rdi(struct drm_device *dev) | |||
496 | } | 512 | } |
497 | 513 | ||
498 | void | 514 | void |
499 | nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | 515 | nv20_graph_set_tile_region(struct drm_device *dev, int i) |
500 | uint32_t size, uint32_t pitch) | ||
501 | { | 516 | { |
502 | uint32_t limit = max(1u, addr + size) - 1; | 517 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
503 | 518 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | |
504 | if (pitch) | ||
505 | addr |= 1; | ||
506 | 519 | ||
507 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); | 520 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); |
508 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); | 521 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); |
509 | nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); | 522 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); |
510 | 523 | ||
511 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); | 524 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); |
512 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit); | 525 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit); |
513 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); | 526 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); |
514 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch); | 527 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch); |
515 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); | 528 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); |
516 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr); | 529 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr); |
530 | |||
531 | if (dev_priv->card_type == NV_20) { | ||
532 | nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp); | ||
533 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); | ||
534 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp); | ||
535 | } | ||
517 | } | 536 | } |
518 | 537 | ||
519 | int | 538 | int |
@@ -560,6 +579,13 @@ nv20_graph_init(struct drm_device *dev) | |||
560 | 579 | ||
561 | nv20_graph_rdi(dev); | 580 | nv20_graph_rdi(dev); |
562 | 581 | ||
582 | ret = nv20_graph_register(dev); | ||
583 | if (ret) { | ||
584 | nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); | ||
585 | return ret; | ||
586 | } | ||
587 | |||
588 | nouveau_irq_register(dev, 12, nv20_graph_isr); | ||
563 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | 589 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); |
564 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); | 590 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); |
565 | 591 | ||
@@ -571,16 +597,17 @@ nv20_graph_init(struct drm_device *dev) | |||
571 | nv_wr32(dev, 0x40009C , 0x00000040); | 597 | nv_wr32(dev, 0x40009C , 0x00000040); |
572 | 598 | ||
573 | if (dev_priv->chipset >= 0x25) { | 599 | if (dev_priv->chipset >= 0x25) { |
574 | nv_wr32(dev, 0x400890, 0x00080000); | 600 | nv_wr32(dev, 0x400890, 0x00a8cfff); |
575 | nv_wr32(dev, 0x400610, 0x304B1FB6); | 601 | nv_wr32(dev, 0x400610, 0x304B1FB6); |
576 | nv_wr32(dev, 0x400B80, 0x18B82880); | 602 | nv_wr32(dev, 0x400B80, 0x1cbd3883); |
577 | nv_wr32(dev, 0x400B84, 0x44000000); | 603 | nv_wr32(dev, 0x400B84, 0x44000000); |
578 | nv_wr32(dev, 0x400098, 0x40000080); | 604 | nv_wr32(dev, 0x400098, 0x40000080); |
579 | nv_wr32(dev, 0x400B88, 0x000000ff); | 605 | nv_wr32(dev, 0x400B88, 0x000000ff); |
606 | |||
580 | } else { | 607 | } else { |
581 | nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */ | 608 | nv_wr32(dev, 0x400880, 0x0008c7df); |
582 | nv_wr32(dev, 0x400094, 0x00000005); | 609 | nv_wr32(dev, 0x400094, 0x00000005); |
583 | nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */ | 610 | nv_wr32(dev, 0x400B80, 0x45eae20e); |
584 | nv_wr32(dev, 0x400B84, 0x24000000); | 611 | nv_wr32(dev, 0x400B84, 0x24000000); |
585 | nv_wr32(dev, 0x400098, 0x00000040); | 612 | nv_wr32(dev, 0x400098, 0x00000040); |
586 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038); | 613 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038); |
@@ -591,14 +618,8 @@ nv20_graph_init(struct drm_device *dev) | |||
591 | 618 | ||
592 | /* Turn all the tiling regions off. */ | 619 | /* Turn all the tiling regions off. */ |
593 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) | 620 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) |
594 | nv20_graph_set_region_tiling(dev, i, 0, 0, 0); | 621 | nv20_graph_set_tile_region(dev, i); |
595 | 622 | ||
596 | for (i = 0; i < 8; i++) { | ||
597 | nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); | ||
598 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4); | ||
599 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, | ||
600 | nv_rd32(dev, 0x100300 + i * 4)); | ||
601 | } | ||
602 | nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324)); | 623 | nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324)); |
603 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); | 624 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); |
604 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324)); | 625 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324)); |
@@ -642,6 +663,9 @@ nv20_graph_takedown(struct drm_device *dev) | |||
642 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 663 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
643 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | 664 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
644 | 665 | ||
666 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); | ||
667 | nouveau_irq_unregister(dev, 12); | ||
668 | |||
645 | nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); | 669 | nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); |
646 | } | 670 | } |
647 | 671 | ||
@@ -684,9 +708,16 @@ nv30_graph_init(struct drm_device *dev) | |||
684 | return ret; | 708 | return ret; |
685 | } | 709 | } |
686 | 710 | ||
711 | ret = nv30_graph_register(dev); | ||
712 | if (ret) { | ||
713 | nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); | ||
714 | return ret; | ||
715 | } | ||
716 | |||
687 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, | 717 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, |
688 | pgraph->ctx_table->pinst >> 4); | 718 | pgraph->ctx_table->pinst >> 4); |
689 | 719 | ||
720 | nouveau_irq_register(dev, 12, nv20_graph_isr); | ||
690 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | 721 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); |
691 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); | 722 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); |
692 | 723 | ||
@@ -724,7 +755,7 @@ nv30_graph_init(struct drm_device *dev) | |||
724 | 755 | ||
725 | /* Turn all the tiling regions off. */ | 756 | /* Turn all the tiling regions off. */ |
726 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) | 757 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) |
727 | nv20_graph_set_region_tiling(dev, i, 0, 0, 0); | 758 | nv20_graph_set_tile_region(dev, i); |
728 | 759 | ||
729 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); | 760 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); |
730 | nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); | 761 | nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); |
@@ -744,46 +775,125 @@ nv30_graph_init(struct drm_device *dev) | |||
744 | return 0; | 775 | return 0; |
745 | } | 776 | } |
746 | 777 | ||
747 | struct nouveau_pgraph_object_class nv20_graph_grclass[] = { | 778 | static int |
748 | { 0x0030, false, NULL }, /* null */ | 779 | nv20_graph_register(struct drm_device *dev) |
749 | { 0x0039, false, NULL }, /* m2mf */ | 780 | { |
750 | { 0x004a, false, NULL }, /* gdirect */ | 781 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
751 | { 0x009f, false, NULL }, /* imageblit (nv12) */ | 782 | |
752 | { 0x008a, false, NULL }, /* ifc */ | 783 | if (dev_priv->engine.graph.registered) |
753 | { 0x0089, false, NULL }, /* sifm */ | 784 | return 0; |
754 | { 0x0062, false, NULL }, /* surf2d */ | 785 | |
755 | { 0x0043, false, NULL }, /* rop */ | 786 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ |
756 | { 0x0012, false, NULL }, /* beta1 */ | 787 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ |
757 | { 0x0072, false, NULL }, /* beta4 */ | 788 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ |
758 | { 0x0019, false, NULL }, /* cliprect */ | 789 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ |
759 | { 0x0044, false, NULL }, /* pattern */ | 790 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ |
760 | { 0x009e, false, NULL }, /* swzsurf */ | 791 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ |
761 | { 0x0096, false, NULL }, /* celcius */ | 792 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ |
762 | { 0x0097, false, NULL }, /* kelvin (nv20) */ | 793 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ |
763 | { 0x0597, false, NULL }, /* kelvin (nv25) */ | 794 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ |
764 | {} | 795 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ |
765 | }; | 796 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ |
766 | 797 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | |
767 | struct nouveau_pgraph_object_class nv30_graph_grclass[] = { | 798 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ |
768 | { 0x0030, false, NULL }, /* null */ | 799 | NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */ |
769 | { 0x0039, false, NULL }, /* m2mf */ | 800 | NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */ |
770 | { 0x004a, false, NULL }, /* gdirect */ | 801 | |
771 | { 0x009f, false, NULL }, /* imageblit (nv12) */ | 802 | /* kelvin */ |
772 | { 0x008a, false, NULL }, /* ifc */ | 803 | if (dev_priv->chipset < 0x25) |
773 | { 0x038a, false, NULL }, /* ifc (nv30) */ | 804 | NVOBJ_CLASS(dev, 0x0097, GR); |
774 | { 0x0089, false, NULL }, /* sifm */ | 805 | else |
775 | { 0x0389, false, NULL }, /* sifm (nv30) */ | 806 | NVOBJ_CLASS(dev, 0x0597, GR); |
776 | { 0x0062, false, NULL }, /* surf2d */ | 807 | |
777 | { 0x0362, false, NULL }, /* surf2d (nv30) */ | 808 | /* nvsw */ |
778 | { 0x0043, false, NULL }, /* rop */ | 809 | NVOBJ_CLASS(dev, 0x506e, SW); |
779 | { 0x0012, false, NULL }, /* beta1 */ | 810 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); |
780 | { 0x0072, false, NULL }, /* beta4 */ | 811 | |
781 | { 0x0019, false, NULL }, /* cliprect */ | 812 | dev_priv->engine.graph.registered = true; |
782 | { 0x0044, false, NULL }, /* pattern */ | 813 | return 0; |
783 | { 0x039e, false, NULL }, /* swzsurf */ | 814 | } |
784 | { 0x0397, false, NULL }, /* rankine (nv30) */ | 815 | |
785 | { 0x0497, false, NULL }, /* rankine (nv35) */ | 816 | static int |
786 | { 0x0697, false, NULL }, /* rankine (nv34) */ | 817 | nv30_graph_register(struct drm_device *dev) |
787 | {} | 818 | { |
788 | }; | 819 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
789 | 820 | ||
821 | if (dev_priv->engine.graph.registered) | ||
822 | return 0; | ||
823 | |||
824 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
825 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
826 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ | ||
827 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ | ||
828 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ | ||
829 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ | ||
830 | NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */ | ||
831 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ | ||
832 | NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */ | ||
833 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ | ||
834 | NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */ | ||
835 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ | ||
836 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ | ||
837 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ | ||
838 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | ||
839 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ | ||
840 | NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */ | ||
841 | |||
842 | /* rankine */ | ||
843 | if (0x00000003 & (1 << (dev_priv->chipset & 0x0f))) | ||
844 | NVOBJ_CLASS(dev, 0x0397, GR); | ||
845 | else | ||
846 | if (0x00000010 & (1 << (dev_priv->chipset & 0x0f))) | ||
847 | NVOBJ_CLASS(dev, 0x0697, GR); | ||
848 | else | ||
849 | if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f))) | ||
850 | NVOBJ_CLASS(dev, 0x0497, GR); | ||
851 | |||
852 | /* nvsw */ | ||
853 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
854 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
855 | |||
856 | dev_priv->engine.graph.registered = true; | ||
857 | return 0; | ||
858 | } | ||
859 | |||
860 | static void | ||
861 | nv20_graph_isr(struct drm_device *dev) | ||
862 | { | ||
863 | u32 stat; | ||
864 | |||
865 | while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { | ||
866 | u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | ||
867 | u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); | ||
868 | u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
869 | u32 chid = (addr & 0x01f00000) >> 20; | ||
870 | u32 subc = (addr & 0x00070000) >> 16; | ||
871 | u32 mthd = (addr & 0x00001ffc); | ||
872 | u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
873 | u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; | ||
874 | u32 show = stat; | ||
875 | |||
876 | if (stat & NV_PGRAPH_INTR_ERROR) { | ||
877 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | ||
878 | if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) | ||
879 | show &= ~NV_PGRAPH_INTR_ERROR; | ||
880 | } | ||
881 | } | ||
882 | |||
883 | nv_wr32(dev, NV03_PGRAPH_INTR, stat); | ||
884 | nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); | ||
885 | |||
886 | if (show && nouveau_ratelimit()) { | ||
887 | NV_INFO(dev, "PGRAPH -"); | ||
888 | nouveau_bitfield_print(nv10_graph_intr, show); | ||
889 | printk(" nsource:"); | ||
890 | nouveau_bitfield_print(nv04_graph_nsource, nsource); | ||
891 | printk(" nstatus:"); | ||
892 | nouveau_bitfield_print(nv10_graph_nstatus, nstatus); | ||
893 | printk("\n"); | ||
894 | NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " | ||
895 | "mthd 0x%04x data 0x%08x\n", | ||
896 | chid, subc, class, mthd, data); | ||
897 | } | ||
898 | } | ||
899 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c index 4a3f2f095128..e0135f0e2144 100644 --- a/drivers/gpu/drm/nouveau/nv30_fb.c +++ b/drivers/gpu/drm/nouveau/nv30_fb.c | |||
@@ -29,6 +29,27 @@ | |||
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_drm.h" | 30 | #include "nouveau_drm.h" |
31 | 31 | ||
32 | void | ||
33 | nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, | ||
34 | uint32_t size, uint32_t pitch, uint32_t flags) | ||
35 | { | ||
36 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
37 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
38 | |||
39 | tile->addr = addr | 1; | ||
40 | tile->limit = max(1u, addr + size) - 1; | ||
41 | tile->pitch = pitch; | ||
42 | } | ||
43 | |||
44 | void | ||
45 | nv30_fb_free_tile_region(struct drm_device *dev, int i) | ||
46 | { | ||
47 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
48 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
49 | |||
50 | tile->addr = tile->limit = tile->pitch = 0; | ||
51 | } | ||
52 | |||
32 | static int | 53 | static int |
33 | calc_bias(struct drm_device *dev, int k, int i, int j) | 54 | calc_bias(struct drm_device *dev, int k, int i, int j) |
34 | { | 55 | { |
@@ -65,7 +86,7 @@ nv30_fb_init(struct drm_device *dev) | |||
65 | 86 | ||
66 | /* Turn all the tiling regions off. */ | 87 | /* Turn all the tiling regions off. */ |
67 | for (i = 0; i < pfb->num_tiles; i++) | 88 | for (i = 0; i < pfb->num_tiles; i++) |
68 | pfb->set_region_tiling(dev, i, 0, 0, 0); | 89 | pfb->set_tile_region(dev, i); |
69 | 90 | ||
70 | /* Init the memory timing regs at 0x10037c/0x1003ac */ | 91 | /* Init the memory timing regs at 0x10037c/0x1003ac */ |
71 | if (dev_priv->chipset == 0x30 || | 92 | if (dev_priv->chipset == 0x30 || |
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c index 3cd07d8d5bd7..f3d9c0505f7b 100644 --- a/drivers/gpu/drm/nouveau/nv40_fb.c +++ b/drivers/gpu/drm/nouveau/nv40_fb.c | |||
@@ -4,26 +4,22 @@ | |||
4 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
5 | 5 | ||
6 | void | 6 | void |
7 | nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | 7 | nv40_fb_set_tile_region(struct drm_device *dev, int i) |
8 | uint32_t size, uint32_t pitch) | ||
9 | { | 8 | { |
10 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 9 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
11 | uint32_t limit = max(1u, addr + size) - 1; | 10 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
12 | |||
13 | if (pitch) | ||
14 | addr |= 1; | ||
15 | 11 | ||
16 | switch (dev_priv->chipset) { | 12 | switch (dev_priv->chipset) { |
17 | case 0x40: | 13 | case 0x40: |
18 | nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); | 14 | nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); |
19 | nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); | 15 | nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); |
20 | nv_wr32(dev, NV10_PFB_TILE(i), addr); | 16 | nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); |
21 | break; | 17 | break; |
22 | 18 | ||
23 | default: | 19 | default: |
24 | nv_wr32(dev, NV40_PFB_TLIMIT(i), limit); | 20 | nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit); |
25 | nv_wr32(dev, NV40_PFB_TSIZE(i), pitch); | 21 | nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch); |
26 | nv_wr32(dev, NV40_PFB_TILE(i), addr); | 22 | nv_wr32(dev, NV40_PFB_TILE(i), tile->addr); |
27 | break; | 23 | break; |
28 | } | 24 | } |
29 | } | 25 | } |
@@ -64,7 +60,7 @@ nv40_fb_init(struct drm_device *dev) | |||
64 | 60 | ||
65 | /* Turn all the tiling regions off. */ | 61 | /* Turn all the tiling regions off. */ |
66 | for (i = 0; i < pfb->num_tiles; i++) | 62 | for (i = 0; i < pfb->num_tiles; i++) |
67 | pfb->set_region_tiling(dev, i, 0, 0, 0); | 63 | pfb->set_tile_region(dev, i); |
68 | 64 | ||
69 | return 0; | 65 | return 0; |
70 | } | 66 | } |
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index d337b8b28cdd..c86e4d4e9b96 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c | |||
@@ -47,6 +47,11 @@ nv40_fifo_create_context(struct nouveau_channel *chan) | |||
47 | if (ret) | 47 | if (ret) |
48 | return ret; | 48 | return ret; |
49 | 49 | ||
50 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
51 | NV40_USER(chan->id), PAGE_SIZE); | ||
52 | if (!chan->user) | ||
53 | return -ENOMEM; | ||
54 | |||
50 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 55 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
51 | 56 | ||
52 | nv_wi32(dev, fc + 0, chan->pushbuf_base); | 57 | nv_wi32(dev, fc + 0, chan->pushbuf_base); |
@@ -70,17 +75,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan) | |||
70 | return 0; | 75 | return 0; |
71 | } | 76 | } |
72 | 77 | ||
73 | void | ||
74 | nv40_fifo_destroy_context(struct nouveau_channel *chan) | ||
75 | { | ||
76 | struct drm_device *dev = chan->dev; | ||
77 | |||
78 | nv_wr32(dev, NV04_PFIFO_MODE, | ||
79 | nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); | ||
80 | |||
81 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | ||
82 | } | ||
83 | |||
84 | static void | 78 | static void |
85 | nv40_fifo_do_load_context(struct drm_device *dev, int chid) | 79 | nv40_fifo_do_load_context(struct drm_device *dev, int chid) |
86 | { | 80 | { |
@@ -279,6 +273,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev) | |||
279 | static void | 273 | static void |
280 | nv40_fifo_init_intr(struct drm_device *dev) | 274 | nv40_fifo_init_intr(struct drm_device *dev) |
281 | { | 275 | { |
276 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
282 | nv_wr32(dev, 0x002100, 0xffffffff); | 277 | nv_wr32(dev, 0x002100, 0xffffffff); |
283 | nv_wr32(dev, 0x002140, 0xffffffff); | 278 | nv_wr32(dev, 0x002140, 0xffffffff); |
284 | } | 279 | } |
@@ -301,7 +296,7 @@ nv40_fifo_init(struct drm_device *dev) | |||
301 | pfifo->reassign(dev, true); | 296 | pfifo->reassign(dev, true); |
302 | 297 | ||
303 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 298 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
304 | if (dev_priv->fifos[i]) { | 299 | if (dev_priv->channels.ptr[i]) { |
305 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | 300 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); |
306 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | 301 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); |
307 | } | 302 | } |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 7ee1b91569b8..0618846a97ce 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -29,6 +29,9 @@ | |||
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_grctx.h" | 30 | #include "nouveau_grctx.h" |
31 | 31 | ||
32 | static int nv40_graph_register(struct drm_device *); | ||
33 | static void nv40_graph_isr(struct drm_device *); | ||
34 | |||
32 | struct nouveau_channel * | 35 | struct nouveau_channel * |
33 | nv40_graph_channel(struct drm_device *dev) | 36 | nv40_graph_channel(struct drm_device *dev) |
34 | { | 37 | { |
@@ -42,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev) | |||
42 | inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; | 45 | inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; |
43 | 46 | ||
44 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 47 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
45 | struct nouveau_channel *chan = dev_priv->fifos[i]; | 48 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; |
46 | 49 | ||
47 | if (chan && chan->ramin_grctx && | 50 | if (chan && chan->ramin_grctx && |
48 | chan->ramin_grctx->pinst == inst) | 51 | chan->ramin_grctx->pinst == inst) |
@@ -79,6 +82,22 @@ nv40_graph_create_context(struct nouveau_channel *chan) | |||
79 | void | 82 | void |
80 | nv40_graph_destroy_context(struct nouveau_channel *chan) | 83 | nv40_graph_destroy_context(struct nouveau_channel *chan) |
81 | { | 84 | { |
85 | struct drm_device *dev = chan->dev; | ||
86 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
87 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
88 | unsigned long flags; | ||
89 | |||
90 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
91 | pgraph->fifo_access(dev, false); | ||
92 | |||
93 | /* Unload the context if it's the currently active one */ | ||
94 | if (pgraph->channel(dev) == chan) | ||
95 | pgraph->unload_context(dev); | ||
96 | |||
97 | pgraph->fifo_access(dev, true); | ||
98 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
99 | |||
100 | /* Free the context resources */ | ||
82 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); | 101 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); |
83 | } | 102 | } |
84 | 103 | ||
@@ -174,43 +193,39 @@ nv40_graph_unload_context(struct drm_device *dev) | |||
174 | } | 193 | } |
175 | 194 | ||
176 | void | 195 | void |
177 | nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | 196 | nv40_graph_set_tile_region(struct drm_device *dev, int i) |
178 | uint32_t size, uint32_t pitch) | ||
179 | { | 197 | { |
180 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 198 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
181 | uint32_t limit = max(1u, addr + size) - 1; | 199 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
182 | |||
183 | if (pitch) | ||
184 | addr |= 1; | ||
185 | 200 | ||
186 | switch (dev_priv->chipset) { | 201 | switch (dev_priv->chipset) { |
187 | case 0x44: | 202 | case 0x44: |
188 | case 0x4a: | 203 | case 0x4a: |
189 | case 0x4e: | 204 | case 0x4e: |
190 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); | 205 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); |
191 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); | 206 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); |
192 | nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); | 207 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); |
193 | break; | 208 | break; |
194 | 209 | ||
195 | case 0x46: | 210 | case 0x46: |
196 | case 0x47: | 211 | case 0x47: |
197 | case 0x49: | 212 | case 0x49: |
198 | case 0x4b: | 213 | case 0x4b: |
199 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch); | 214 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); |
200 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit); | 215 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); |
201 | nv_wr32(dev, NV47_PGRAPH_TILE(i), addr); | 216 | nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); |
202 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); | 217 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); |
203 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); | 218 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); |
204 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); | 219 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); |
205 | break; | 220 | break; |
206 | 221 | ||
207 | default: | 222 | default: |
208 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); | 223 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); |
209 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); | 224 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); |
210 | nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); | 225 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); |
211 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); | 226 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); |
212 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); | 227 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); |
213 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); | 228 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); |
214 | break; | 229 | break; |
215 | } | 230 | } |
216 | } | 231 | } |
@@ -232,7 +247,7 @@ nv40_graph_init(struct drm_device *dev) | |||
232 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | 247 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
233 | struct nouveau_grctx ctx = {}; | 248 | struct nouveau_grctx ctx = {}; |
234 | uint32_t vramsz, *cp; | 249 | uint32_t vramsz, *cp; |
235 | int i, j; | 250 | int ret, i, j; |
236 | 251 | ||
237 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | 252 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & |
238 | ~NV_PMC_ENABLE_PGRAPH); | 253 | ~NV_PMC_ENABLE_PGRAPH); |
@@ -256,9 +271,14 @@ nv40_graph_init(struct drm_device *dev) | |||
256 | 271 | ||
257 | kfree(cp); | 272 | kfree(cp); |
258 | 273 | ||
274 | ret = nv40_graph_register(dev); | ||
275 | if (ret) | ||
276 | return ret; | ||
277 | |||
259 | /* No context present currently */ | 278 | /* No context present currently */ |
260 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); | 279 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); |
261 | 280 | ||
281 | nouveau_irq_register(dev, 12, nv40_graph_isr); | ||
262 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | 282 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); |
263 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); | 283 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); |
264 | 284 | ||
@@ -347,7 +367,7 @@ nv40_graph_init(struct drm_device *dev) | |||
347 | 367 | ||
348 | /* Turn all the tiling regions off. */ | 368 | /* Turn all the tiling regions off. */ |
349 | for (i = 0; i < pfb->num_tiles; i++) | 369 | for (i = 0; i < pfb->num_tiles; i++) |
350 | nv40_graph_set_region_tiling(dev, i, 0, 0, 0); | 370 | nv40_graph_set_tile_region(dev, i); |
351 | 371 | ||
352 | /* begin RAM config */ | 372 | /* begin RAM config */ |
353 | vramsz = pci_resource_len(dev->pdev, 0) - 1; | 373 | vramsz = pci_resource_len(dev->pdev, 0) - 1; |
@@ -390,26 +410,111 @@ nv40_graph_init(struct drm_device *dev) | |||
390 | 410 | ||
391 | void nv40_graph_takedown(struct drm_device *dev) | 411 | void nv40_graph_takedown(struct drm_device *dev) |
392 | { | 412 | { |
413 | nouveau_irq_unregister(dev, 12); | ||
393 | } | 414 | } |
394 | 415 | ||
395 | struct nouveau_pgraph_object_class nv40_graph_grclass[] = { | 416 | static int |
396 | { 0x0030, false, NULL }, /* null */ | 417 | nv40_graph_register(struct drm_device *dev) |
397 | { 0x0039, false, NULL }, /* m2mf */ | 418 | { |
398 | { 0x004a, false, NULL }, /* gdirect */ | 419 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
399 | { 0x009f, false, NULL }, /* imageblit (nv12) */ | ||
400 | { 0x008a, false, NULL }, /* ifc */ | ||
401 | { 0x0089, false, NULL }, /* sifm */ | ||
402 | { 0x3089, false, NULL }, /* sifm (nv40) */ | ||
403 | { 0x0062, false, NULL }, /* surf2d */ | ||
404 | { 0x3062, false, NULL }, /* surf2d (nv40) */ | ||
405 | { 0x0043, false, NULL }, /* rop */ | ||
406 | { 0x0012, false, NULL }, /* beta1 */ | ||
407 | { 0x0072, false, NULL }, /* beta4 */ | ||
408 | { 0x0019, false, NULL }, /* cliprect */ | ||
409 | { 0x0044, false, NULL }, /* pattern */ | ||
410 | { 0x309e, false, NULL }, /* swzsurf */ | ||
411 | { 0x4097, false, NULL }, /* curie (nv40) */ | ||
412 | { 0x4497, false, NULL }, /* curie (nv44) */ | ||
413 | {} | ||
414 | }; | ||
415 | 420 | ||
421 | if (dev_priv->engine.graph.registered) | ||
422 | return 0; | ||
423 | |||
424 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
425 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
426 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ | ||
427 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ | ||
428 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ | ||
429 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ | ||
430 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ | ||
431 | NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ | ||
432 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ | ||
433 | NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ | ||
434 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ | ||
435 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ | ||
436 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ | ||
437 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | ||
438 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ | ||
439 | NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ | ||
440 | |||
441 | /* curie */ | ||
442 | if (dev_priv->chipset >= 0x60 || | ||
443 | 0x00005450 & (1 << (dev_priv->chipset & 0x0f))) | ||
444 | NVOBJ_CLASS(dev, 0x4497, GR); | ||
445 | else | ||
446 | NVOBJ_CLASS(dev, 0x4097, GR); | ||
447 | |||
448 | /* nvsw */ | ||
449 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
450 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
451 | |||
452 | dev_priv->engine.graph.registered = true; | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static int | ||
457 | nv40_graph_isr_chid(struct drm_device *dev, u32 inst) | ||
458 | { | ||
459 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
460 | struct nouveau_channel *chan; | ||
461 | unsigned long flags; | ||
462 | int i; | ||
463 | |||
464 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
465 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
466 | chan = dev_priv->channels.ptr[i]; | ||
467 | if (!chan || !chan->ramin_grctx) | ||
468 | continue; | ||
469 | |||
470 | if (inst == chan->ramin_grctx->pinst) | ||
471 | break; | ||
472 | } | ||
473 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
474 | return i; | ||
475 | } | ||
476 | |||
477 | static void | ||
478 | nv40_graph_isr(struct drm_device *dev) | ||
479 | { | ||
480 | u32 stat; | ||
481 | |||
482 | while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { | ||
483 | u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | ||
484 | u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); | ||
485 | u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4; | ||
486 | u32 chid = nv40_graph_isr_chid(dev, inst); | ||
487 | u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
488 | u32 subc = (addr & 0x00070000) >> 16; | ||
489 | u32 mthd = (addr & 0x00001ffc); | ||
490 | u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
491 | u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff; | ||
492 | u32 show = stat; | ||
493 | |||
494 | if (stat & NV_PGRAPH_INTR_ERROR) { | ||
495 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | ||
496 | if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) | ||
497 | show &= ~NV_PGRAPH_INTR_ERROR; | ||
498 | } else | ||
499 | if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { | ||
500 | nv_mask(dev, 0x402000, 0, 0); | ||
501 | } | ||
502 | } | ||
503 | |||
504 | nv_wr32(dev, NV03_PGRAPH_INTR, stat); | ||
505 | nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); | ||
506 | |||
507 | if (show && nouveau_ratelimit()) { | ||
508 | NV_INFO(dev, "PGRAPH -"); | ||
509 | nouveau_bitfield_print(nv10_graph_intr, show); | ||
510 | printk(" nsource:"); | ||
511 | nouveau_bitfield_print(nv04_graph_nsource, nsource); | ||
512 | printk(" nstatus:"); | ||
513 | nouveau_bitfield_print(nv10_graph_nstatus, nstatus); | ||
514 | printk("\n"); | ||
515 | NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d " | ||
516 | "class 0x%04x mthd 0x%04x data 0x%08x\n", | ||
517 | chid, inst, subc, class, mthd, data); | ||
518 | } | ||
519 | } | ||
520 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 56476d0c6de8..2c346f797285 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -345,7 +345,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
345 | uint32_t buffer_handle, uint32_t width, uint32_t height) | 345 | uint32_t buffer_handle, uint32_t width, uint32_t height) |
346 | { | 346 | { |
347 | struct drm_device *dev = crtc->dev; | 347 | struct drm_device *dev = crtc->dev; |
348 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
349 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 348 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
350 | struct nouveau_bo *cursor = NULL; | 349 | struct nouveau_bo *cursor = NULL; |
351 | struct drm_gem_object *gem; | 350 | struct drm_gem_object *gem; |
@@ -374,8 +373,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
374 | 373 | ||
375 | nouveau_bo_unmap(cursor); | 374 | nouveau_bo_unmap(cursor); |
376 | 375 | ||
377 | nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset - | 376 | nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT); |
378 | dev_priv->vm_vram_base); | ||
379 | nv_crtc->cursor.show(nv_crtc, true); | 377 | nv_crtc->cursor.show(nv_crtc, true); |
380 | 378 | ||
381 | out: | 379 | out: |
@@ -437,6 +435,7 @@ static const struct drm_crtc_funcs nv50_crtc_funcs = { | |||
437 | .cursor_move = nv50_crtc_cursor_move, | 435 | .cursor_move = nv50_crtc_cursor_move, |
438 | .gamma_set = nv50_crtc_gamma_set, | 436 | .gamma_set = nv50_crtc_gamma_set, |
439 | .set_config = drm_crtc_helper_set_config, | 437 | .set_config = drm_crtc_helper_set_config, |
438 | .page_flip = nouveau_crtc_page_flip, | ||
440 | .destroy = nv50_crtc_destroy, | 439 | .destroy = nv50_crtc_destroy, |
441 | }; | 440 | }; |
442 | 441 | ||
@@ -453,6 +452,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc) | |||
453 | 452 | ||
454 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | 453 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); |
455 | 454 | ||
455 | drm_vblank_pre_modeset(dev, nv_crtc->index); | ||
456 | nv50_crtc_blank(nv_crtc, true); | 456 | nv50_crtc_blank(nv_crtc, true); |
457 | } | 457 | } |
458 | 458 | ||
@@ -468,6 +468,7 @@ nv50_crtc_commit(struct drm_crtc *crtc) | |||
468 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | 468 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); |
469 | 469 | ||
470 | nv50_crtc_blank(nv_crtc, false); | 470 | nv50_crtc_blank(nv_crtc, false); |
471 | drm_vblank_post_modeset(dev, nv_crtc->index); | ||
471 | 472 | ||
472 | ret = RING_SPACE(evo, 2); | 473 | ret = RING_SPACE(evo, 2); |
473 | if (ret) { | 474 | if (ret) { |
@@ -545,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
545 | return -EINVAL; | 546 | return -EINVAL; |
546 | } | 547 | } |
547 | 548 | ||
548 | nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; | 549 | nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT; |
549 | nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); | 550 | nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); |
550 | nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; | 551 | nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; |
551 | if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { | 552 | if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index f624c611ddea..7cc94ed9ed95 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include "nouveau_ramht.h" | 33 | #include "nouveau_ramht.h" |
34 | #include "drm_crtc_helper.h" | 34 | #include "drm_crtc_helper.h" |
35 | 35 | ||
36 | static void nv50_display_isr(struct drm_device *); | ||
37 | |||
36 | static inline int | 38 | static inline int |
37 | nv50_sor_nr(struct drm_device *dev) | 39 | nv50_sor_nr(struct drm_device *dev) |
38 | { | 40 | { |
@@ -46,159 +48,6 @@ nv50_sor_nr(struct drm_device *dev) | |||
46 | return 4; | 48 | return 4; |
47 | } | 49 | } |
48 | 50 | ||
49 | static void | ||
50 | nv50_evo_channel_del(struct nouveau_channel **pchan) | ||
51 | { | ||
52 | struct nouveau_channel *chan = *pchan; | ||
53 | |||
54 | if (!chan) | ||
55 | return; | ||
56 | *pchan = NULL; | ||
57 | |||
58 | nouveau_gpuobj_channel_takedown(chan); | ||
59 | nouveau_bo_unmap(chan->pushbuf_bo); | ||
60 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); | ||
61 | |||
62 | if (chan->user) | ||
63 | iounmap(chan->user); | ||
64 | |||
65 | kfree(chan); | ||
66 | } | ||
67 | |||
68 | static int | ||
69 | nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, | ||
70 | uint32_t tile_flags, uint32_t magic_flags, | ||
71 | uint32_t offset, uint32_t limit) | ||
72 | { | ||
73 | struct drm_nouveau_private *dev_priv = evo->dev->dev_private; | ||
74 | struct drm_device *dev = evo->dev; | ||
75 | struct nouveau_gpuobj *obj = NULL; | ||
76 | int ret; | ||
77 | |||
78 | ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj); | ||
79 | if (ret) | ||
80 | return ret; | ||
81 | obj->engine = NVOBJ_ENGINE_DISPLAY; | ||
82 | |||
83 | nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); | ||
84 | nv_wo32(obj, 4, limit); | ||
85 | nv_wo32(obj, 8, offset); | ||
86 | nv_wo32(obj, 12, 0x00000000); | ||
87 | nv_wo32(obj, 16, 0x00000000); | ||
88 | if (dev_priv->card_type < NV_C0) | ||
89 | nv_wo32(obj, 20, 0x00010000); | ||
90 | else | ||
91 | nv_wo32(obj, 20, 0x00020000); | ||
92 | dev_priv->engine.instmem.flush(dev); | ||
93 | |||
94 | ret = nouveau_ramht_insert(evo, name, obj); | ||
95 | nouveau_gpuobj_ref(NULL, &obj); | ||
96 | if (ret) { | ||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static int | ||
104 | nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) | ||
105 | { | ||
106 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
107 | struct nouveau_gpuobj *ramht = NULL; | ||
108 | struct nouveau_channel *chan; | ||
109 | int ret; | ||
110 | |||
111 | chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); | ||
112 | if (!chan) | ||
113 | return -ENOMEM; | ||
114 | *pchan = chan; | ||
115 | |||
116 | chan->id = -1; | ||
117 | chan->dev = dev; | ||
118 | chan->user_get = 4; | ||
119 | chan->user_put = 0; | ||
120 | |||
121 | ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, | ||
122 | NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); | ||
123 | if (ret) { | ||
124 | NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); | ||
125 | nv50_evo_channel_del(pchan); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | ret = drm_mm_init(&chan->ramin_heap, 0, 32768); | ||
130 | if (ret) { | ||
131 | NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); | ||
132 | nv50_evo_channel_del(pchan); | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht); | ||
137 | if (ret) { | ||
138 | NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); | ||
139 | nv50_evo_channel_del(pchan); | ||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | ret = nouveau_ramht_new(dev, ramht, &chan->ramht); | ||
144 | nouveau_gpuobj_ref(NULL, &ramht); | ||
145 | if (ret) { | ||
146 | nv50_evo_channel_del(pchan); | ||
147 | return ret; | ||
148 | } | ||
149 | |||
150 | if (dev_priv->chipset != 0x50) { | ||
151 | ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, | ||
152 | 0, 0xffffffff); | ||
153 | if (ret) { | ||
154 | nv50_evo_channel_del(pchan); | ||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | |||
159 | ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19, | ||
160 | 0, 0xffffffff); | ||
161 | if (ret) { | ||
162 | nv50_evo_channel_del(pchan); | ||
163 | return ret; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, | ||
168 | 0, dev_priv->vram_size); | ||
169 | if (ret) { | ||
170 | nv50_evo_channel_del(pchan); | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, | ||
175 | false, true, &chan->pushbuf_bo); | ||
176 | if (ret == 0) | ||
177 | ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM); | ||
178 | if (ret) { | ||
179 | NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); | ||
180 | nv50_evo_channel_del(pchan); | ||
181 | return ret; | ||
182 | } | ||
183 | |||
184 | ret = nouveau_bo_map(chan->pushbuf_bo); | ||
185 | if (ret) { | ||
186 | NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); | ||
187 | nv50_evo_channel_del(pchan); | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
192 | NV50_PDISPLAY_USER(0), PAGE_SIZE); | ||
193 | if (!chan->user) { | ||
194 | NV_ERROR(dev, "Error mapping EVO control regs.\n"); | ||
195 | nv50_evo_channel_del(pchan); | ||
196 | return -ENOMEM; | ||
197 | } | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | int | 51 | int |
203 | nv50_display_early_init(struct drm_device *dev) | 52 | nv50_display_early_init(struct drm_device *dev) |
204 | { | 53 | { |
@@ -214,17 +63,16 @@ int | |||
214 | nv50_display_init(struct drm_device *dev) | 63 | nv50_display_init(struct drm_device *dev) |
215 | { | 64 | { |
216 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 65 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
217 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
218 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | 66 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; |
219 | struct nouveau_channel *evo = dev_priv->evo; | ||
220 | struct drm_connector *connector; | 67 | struct drm_connector *connector; |
221 | uint32_t val, ram_amount; | 68 | struct nouveau_channel *evo; |
222 | uint64_t start; | ||
223 | int ret, i; | 69 | int ret, i; |
70 | u32 val; | ||
224 | 71 | ||
225 | NV_DEBUG_KMS(dev, "\n"); | 72 | NV_DEBUG_KMS(dev, "\n"); |
226 | 73 | ||
227 | nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); | 74 | nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); |
75 | |||
228 | /* | 76 | /* |
229 | * I think the 0x006101XX range is some kind of main control area | 77 | * I think the 0x006101XX range is some kind of main control area |
230 | * that enables things. | 78 | * that enables things. |
@@ -240,16 +88,19 @@ nv50_display_init(struct drm_device *dev) | |||
240 | val = nv_rd32(dev, 0x0061610c + (i * 0x800)); | 88 | val = nv_rd32(dev, 0x0061610c + (i * 0x800)); |
241 | nv_wr32(dev, 0x0061019c + (i * 0x10), val); | 89 | nv_wr32(dev, 0x0061019c + (i * 0x10), val); |
242 | } | 90 | } |
91 | |||
243 | /* DAC */ | 92 | /* DAC */ |
244 | for (i = 0; i < 3; i++) { | 93 | for (i = 0; i < 3; i++) { |
245 | val = nv_rd32(dev, 0x0061a000 + (i * 0x800)); | 94 | val = nv_rd32(dev, 0x0061a000 + (i * 0x800)); |
246 | nv_wr32(dev, 0x006101d0 + (i * 0x04), val); | 95 | nv_wr32(dev, 0x006101d0 + (i * 0x04), val); |
247 | } | 96 | } |
97 | |||
248 | /* SOR */ | 98 | /* SOR */ |
249 | for (i = 0; i < nv50_sor_nr(dev); i++) { | 99 | for (i = 0; i < nv50_sor_nr(dev); i++) { |
250 | val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); | 100 | val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); |
251 | nv_wr32(dev, 0x006101e0 + (i * 0x04), val); | 101 | nv_wr32(dev, 0x006101e0 + (i * 0x04), val); |
252 | } | 102 | } |
103 | |||
253 | /* EXT */ | 104 | /* EXT */ |
254 | for (i = 0; i < 3; i++) { | 105 | for (i = 0; i < 3; i++) { |
255 | val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); | 106 | val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); |
@@ -262,17 +113,6 @@ nv50_display_init(struct drm_device *dev) | |||
262 | nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); | 113 | nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); |
263 | } | 114 | } |
264 | 115 | ||
265 | /* This used to be in crtc unblank, but seems out of place there. */ | ||
266 | nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); | ||
267 | /* RAM is clamped to 256 MiB. */ | ||
268 | ram_amount = dev_priv->vram_size; | ||
269 | NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); | ||
270 | if (ram_amount > 256*1024*1024) | ||
271 | ram_amount = 256*1024*1024; | ||
272 | nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1); | ||
273 | nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000); | ||
274 | nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0); | ||
275 | |||
276 | /* The precise purpose is unknown, i suspect it has something to do | 116 | /* The precise purpose is unknown, i suspect it has something to do |
277 | * with text mode. | 117 | * with text mode. |
278 | */ | 118 | */ |
@@ -287,37 +127,6 @@ nv50_display_init(struct drm_device *dev) | |||
287 | } | 127 | } |
288 | } | 128 | } |
289 | 129 | ||
290 | /* taken from nv bug #12637, attempts to un-wedge the hw if it's | ||
291 | * stuck in some unspecified state | ||
292 | */ | ||
293 | start = ptimer->read(dev); | ||
294 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00); | ||
295 | while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) { | ||
296 | if ((val & 0x9f0000) == 0x20000) | ||
297 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), | ||
298 | val | 0x800000); | ||
299 | |||
300 | if ((val & 0x3f0000) == 0x30000) | ||
301 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), | ||
302 | val | 0x200000); | ||
303 | |||
304 | if (ptimer->read(dev) - start > 1000000000ULL) { | ||
305 | NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n"); | ||
306 | NV_ERROR(dev, "0x610200 = 0x%08x\n", val); | ||
307 | return -EBUSY; | ||
308 | } | ||
309 | } | ||
310 | |||
311 | nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE); | ||
312 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03); | ||
313 | if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), | ||
314 | 0x40000000, 0x40000000)) { | ||
315 | NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); | ||
316 | NV_ERROR(dev, "0x610200 = 0x%08x\n", | ||
317 | nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); | ||
318 | return -EBUSY; | ||
319 | } | ||
320 | |||
321 | for (i = 0; i < 2; i++) { | 130 | for (i = 0; i < 2; i++) { |
322 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); | 131 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); |
323 | if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), | 132 | if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), |
@@ -341,39 +150,31 @@ nv50_display_init(struct drm_device *dev) | |||
341 | } | 150 | } |
342 | } | 151 | } |
343 | 152 | ||
344 | nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); | 153 | nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000); |
154 | nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000); | ||
155 | nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000); | ||
156 | nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000); | ||
157 | nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, | ||
158 | NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 | | ||
159 | NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 | | ||
160 | NV50_PDISPLAY_INTR_EN_1_CLK_UNK40); | ||
161 | |||
162 | /* enable hotplug interrupts */ | ||
163 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
164 | struct nouveau_connector *conn = nouveau_connector(connector); | ||
345 | 165 | ||
346 | /* initialise fifo */ | 166 | if (conn->dcb->gpio_tag == 0xff) |
347 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), | 167 | continue; |
348 | ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | | 168 | |
349 | NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | | 169 | pgpio->irq_enable(dev, conn->dcb->gpio_tag, true); |
350 | NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); | ||
351 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000); | ||
352 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002); | ||
353 | if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) { | ||
354 | NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); | ||
355 | NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200)); | ||
356 | return -EBUSY; | ||
357 | } | 170 | } |
358 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), | 171 | |
359 | (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) | | 172 | ret = nv50_evo_init(dev); |
360 | NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED); | ||
361 | nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0); | ||
362 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 | | ||
363 | NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED); | ||
364 | nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1); | ||
365 | |||
366 | evo->dma.max = (4096/4) - 2; | ||
367 | evo->dma.put = 0; | ||
368 | evo->dma.cur = evo->dma.put; | ||
369 | evo->dma.free = evo->dma.max - evo->dma.cur; | ||
370 | |||
371 | ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); | ||
372 | if (ret) | 173 | if (ret) |
373 | return ret; | 174 | return ret; |
175 | evo = dev_priv->evo; | ||
374 | 176 | ||
375 | for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) | 177 | nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); |
376 | OUT_RING(evo, 0); | ||
377 | 178 | ||
378 | ret = RING_SPACE(evo, 11); | 179 | ret = RING_SPACE(evo, 11); |
379 | if (ret) | 180 | if (ret) |
@@ -393,21 +194,6 @@ nv50_display_init(struct drm_device *dev) | |||
393 | if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2)) | 194 | if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2)) |
394 | NV_ERROR(dev, "evo pushbuf stalled\n"); | 195 | NV_ERROR(dev, "evo pushbuf stalled\n"); |
395 | 196 | ||
396 | /* enable clock change interrupts. */ | ||
397 | nv_wr32(dev, 0x610028, 0x00010001); | ||
398 | nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 | | ||
399 | NV50_PDISPLAY_INTR_EN_CLK_UNK20 | | ||
400 | NV50_PDISPLAY_INTR_EN_CLK_UNK40)); | ||
401 | |||
402 | /* enable hotplug interrupts */ | ||
403 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
404 | struct nouveau_connector *conn = nouveau_connector(connector); | ||
405 | |||
406 | if (conn->dcb->gpio_tag == 0xff) | ||
407 | continue; | ||
408 | |||
409 | pgpio->irq_enable(dev, conn->dcb->gpio_tag, true); | ||
410 | } | ||
411 | 197 | ||
412 | return 0; | 198 | return 0; |
413 | } | 199 | } |
@@ -452,13 +238,7 @@ static int nv50_display_disable(struct drm_device *dev) | |||
452 | } | 238 | } |
453 | } | 239 | } |
454 | 240 | ||
455 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0); | 241 | nv50_evo_fini(dev); |
456 | nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0); | ||
457 | if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) { | ||
458 | NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); | ||
459 | NV_ERROR(dev, "0x610200 = 0x%08x\n", | ||
460 | nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); | ||
461 | } | ||
462 | 242 | ||
463 | for (i = 0; i < 3; i++) { | 243 | for (i = 0; i < 3; i++) { |
464 | if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i), | 244 | if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i), |
@@ -470,7 +250,7 @@ static int nv50_display_disable(struct drm_device *dev) | |||
470 | } | 250 | } |
471 | 251 | ||
472 | /* disable interrupts. */ | 252 | /* disable interrupts. */ |
473 | nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000); | 253 | nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000); |
474 | 254 | ||
475 | /* disable hotplug interrupts */ | 255 | /* disable hotplug interrupts */ |
476 | nv_wr32(dev, 0xe054, 0xffffffff); | 256 | nv_wr32(dev, 0xe054, 0xffffffff); |
@@ -508,13 +288,6 @@ int nv50_display_create(struct drm_device *dev) | |||
508 | 288 | ||
509 | dev->mode_config.fb_base = dev_priv->fb_phys; | 289 | dev->mode_config.fb_base = dev_priv->fb_phys; |
510 | 290 | ||
511 | /* Create EVO channel */ | ||
512 | ret = nv50_evo_channel_new(dev, &dev_priv->evo); | ||
513 | if (ret) { | ||
514 | NV_ERROR(dev, "Error creating EVO channel: %d\n", ret); | ||
515 | return ret; | ||
516 | } | ||
517 | |||
518 | /* Create CRTC objects */ | 291 | /* Create CRTC objects */ |
519 | for (i = 0; i < 2; i++) | 292 | for (i = 0; i < 2; i++) |
520 | nv50_crtc_create(dev, i); | 293 | nv50_crtc_create(dev, i); |
@@ -557,6 +330,9 @@ int nv50_display_create(struct drm_device *dev) | |||
557 | } | 330 | } |
558 | } | 331 | } |
559 | 332 | ||
333 | INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); | ||
334 | nouveau_irq_register(dev, 26, nv50_display_isr); | ||
335 | |||
560 | ret = nv50_display_init(dev); | 336 | ret = nv50_display_init(dev); |
561 | if (ret) { | 337 | if (ret) { |
562 | nv50_display_destroy(dev); | 338 | nv50_display_destroy(dev); |
@@ -569,14 +345,12 @@ int nv50_display_create(struct drm_device *dev) | |||
569 | void | 345 | void |
570 | nv50_display_destroy(struct drm_device *dev) | 346 | nv50_display_destroy(struct drm_device *dev) |
571 | { | 347 | { |
572 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
573 | |||
574 | NV_DEBUG_KMS(dev, "\n"); | 348 | NV_DEBUG_KMS(dev, "\n"); |
575 | 349 | ||
576 | drm_mode_config_cleanup(dev); | 350 | drm_mode_config_cleanup(dev); |
577 | 351 | ||
578 | nv50_display_disable(dev); | 352 | nv50_display_disable(dev); |
579 | nv50_evo_channel_del(&dev_priv->evo); | 353 | nouveau_irq_unregister(dev, 26); |
580 | } | 354 | } |
581 | 355 | ||
582 | static u16 | 356 | static u16 |
@@ -660,32 +434,32 @@ static void | |||
660 | nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) | 434 | nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) |
661 | { | 435 | { |
662 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 436 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
663 | struct nouveau_channel *chan; | 437 | struct nouveau_channel *chan, *tmp; |
664 | struct list_head *entry, *tmp; | ||
665 | 438 | ||
666 | list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) { | 439 | list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting, |
667 | chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait); | 440 | nvsw.vbl_wait) { |
441 | if (chan->nvsw.vblsem_head != crtc) | ||
442 | continue; | ||
668 | 443 | ||
669 | nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset, | 444 | nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset, |
670 | chan->nvsw.vblsem_rval); | 445 | chan->nvsw.vblsem_rval); |
671 | list_del(&chan->nvsw.vbl_wait); | 446 | list_del(&chan->nvsw.vbl_wait); |
447 | drm_vblank_put(dev, crtc); | ||
672 | } | 448 | } |
449 | |||
450 | drm_handle_vblank(dev, crtc); | ||
673 | } | 451 | } |
674 | 452 | ||
675 | static void | 453 | static void |
676 | nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) | 454 | nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) |
677 | { | 455 | { |
678 | intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC; | ||
679 | |||
680 | if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0) | 456 | if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0) |
681 | nv50_display_vblank_crtc_handler(dev, 0); | 457 | nv50_display_vblank_crtc_handler(dev, 0); |
682 | 458 | ||
683 | if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1) | 459 | if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1) |
684 | nv50_display_vblank_crtc_handler(dev, 1); | 460 | nv50_display_vblank_crtc_handler(dev, 1); |
685 | 461 | ||
686 | nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, | 462 | nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC); |
687 | NV50_PDISPLAY_INTR_EN) & ~intr); | ||
688 | nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr); | ||
689 | } | 463 | } |
690 | 464 | ||
691 | static void | 465 | static void |
@@ -1011,108 +785,31 @@ nv50_display_irq_handler_bh(struct work_struct *work) | |||
1011 | static void | 785 | static void |
1012 | nv50_display_error_handler(struct drm_device *dev) | 786 | nv50_display_error_handler(struct drm_device *dev) |
1013 | { | 787 | { |
1014 | uint32_t addr, data; | 788 | u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16; |
1015 | 789 | u32 addr, data; | |
1016 | nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000); | 790 | int chid; |
1017 | addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR); | ||
1018 | data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA); | ||
1019 | |||
1020 | NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n", | ||
1021 | 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); | ||
1022 | |||
1023 | nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); | ||
1024 | } | ||
1025 | |||
1026 | void | ||
1027 | nv50_display_irq_hotplug_bh(struct work_struct *work) | ||
1028 | { | ||
1029 | struct drm_nouveau_private *dev_priv = | ||
1030 | container_of(work, struct drm_nouveau_private, hpd_work); | ||
1031 | struct drm_device *dev = dev_priv->dev; | ||
1032 | struct drm_connector *connector; | ||
1033 | const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; | ||
1034 | uint32_t unplug_mask, plug_mask, change_mask; | ||
1035 | uint32_t hpd0, hpd1; | ||
1036 | |||
1037 | spin_lock_irq(&dev_priv->hpd_state.lock); | ||
1038 | hpd0 = dev_priv->hpd_state.hpd0_bits; | ||
1039 | dev_priv->hpd_state.hpd0_bits = 0; | ||
1040 | hpd1 = dev_priv->hpd_state.hpd1_bits; | ||
1041 | dev_priv->hpd_state.hpd1_bits = 0; | ||
1042 | spin_unlock_irq(&dev_priv->hpd_state.lock); | ||
1043 | |||
1044 | hpd0 &= nv_rd32(dev, 0xe050); | ||
1045 | if (dev_priv->chipset >= 0x90) | ||
1046 | hpd1 &= nv_rd32(dev, 0xe070); | ||
1047 | |||
1048 | plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); | ||
1049 | unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); | ||
1050 | change_mask = plug_mask | unplug_mask; | ||
1051 | |||
1052 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1053 | struct drm_encoder_helper_funcs *helper; | ||
1054 | struct nouveau_connector *nv_connector = | ||
1055 | nouveau_connector(connector); | ||
1056 | struct nouveau_encoder *nv_encoder; | ||
1057 | struct dcb_gpio_entry *gpio; | ||
1058 | uint32_t reg; | ||
1059 | bool plugged; | ||
1060 | |||
1061 | if (!nv_connector->dcb) | ||
1062 | continue; | ||
1063 | 791 | ||
1064 | gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag); | 792 | for (chid = 0; chid < 5; chid++) { |
1065 | if (!gpio || !(change_mask & (1 << gpio->line))) | 793 | if (!(channels & (1 << chid))) |
1066 | continue; | 794 | continue; |
1067 | 795 | ||
1068 | reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]); | 796 | nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid); |
1069 | plugged = !!(reg & (4 << ((gpio->line & 7) << 2))); | 797 | addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid)); |
1070 | NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", | 798 | data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid)); |
1071 | drm_get_connector_name(connector)) ; | 799 | NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x " |
1072 | 800 | "(0x%04x 0x%02x)\n", chid, | |
1073 | if (!connector->encoder || !connector->encoder->crtc || | 801 | addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); |
1074 | !connector->encoder->crtc->enabled) | ||
1075 | continue; | ||
1076 | nv_encoder = nouveau_encoder(connector->encoder); | ||
1077 | helper = connector->encoder->helper_private; | ||
1078 | |||
1079 | if (nv_encoder->dcb->type != OUTPUT_DP) | ||
1080 | continue; | ||
1081 | 802 | ||
1082 | if (plugged) | 803 | nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000); |
1083 | helper->dpms(connector->encoder, DRM_MODE_DPMS_ON); | ||
1084 | else | ||
1085 | helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); | ||
1086 | } | 804 | } |
1087 | |||
1088 | drm_helper_hpd_irq_event(dev); | ||
1089 | } | 805 | } |
1090 | 806 | ||
1091 | void | 807 | static void |
1092 | nv50_display_irq_handler(struct drm_device *dev) | 808 | nv50_display_isr(struct drm_device *dev) |
1093 | { | 809 | { |
1094 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 810 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
1095 | uint32_t delayed = 0; | 811 | uint32_t delayed = 0; |
1096 | 812 | ||
1097 | if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { | ||
1098 | uint32_t hpd0_bits, hpd1_bits = 0; | ||
1099 | |||
1100 | hpd0_bits = nv_rd32(dev, 0xe054); | ||
1101 | nv_wr32(dev, 0xe054, hpd0_bits); | ||
1102 | |||
1103 | if (dev_priv->chipset >= 0x90) { | ||
1104 | hpd1_bits = nv_rd32(dev, 0xe074); | ||
1105 | nv_wr32(dev, 0xe074, hpd1_bits); | ||
1106 | } | ||
1107 | |||
1108 | spin_lock(&dev_priv->hpd_state.lock); | ||
1109 | dev_priv->hpd_state.hpd0_bits |= hpd0_bits; | ||
1110 | dev_priv->hpd_state.hpd1_bits |= hpd1_bits; | ||
1111 | spin_unlock(&dev_priv->hpd_state.lock); | ||
1112 | |||
1113 | queue_work(dev_priv->wq, &dev_priv->hpd_work); | ||
1114 | } | ||
1115 | |||
1116 | while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { | 813 | while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { |
1117 | uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); | 814 | uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); |
1118 | uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); | 815 | uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); |
@@ -1123,9 +820,9 @@ nv50_display_irq_handler(struct drm_device *dev) | |||
1123 | if (!intr0 && !(intr1 & ~delayed)) | 820 | if (!intr0 && !(intr1 & ~delayed)) |
1124 | break; | 821 | break; |
1125 | 822 | ||
1126 | if (intr0 & 0x00010000) { | 823 | if (intr0 & 0x001f0000) { |
1127 | nv50_display_error_handler(dev); | 824 | nv50_display_error_handler(dev); |
1128 | intr0 &= ~0x00010000; | 825 | intr0 &= ~0x001f0000; |
1129 | } | 826 | } |
1130 | 827 | ||
1131 | if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { | 828 | if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { |
@@ -1156,4 +853,3 @@ nv50_display_irq_handler(struct drm_device *dev) | |||
1156 | } | 853 | } |
1157 | } | 854 | } |
1158 | } | 855 | } |
1159 | |||
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h index c551f0b85ee0..f0e30b78ef6b 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.h +++ b/drivers/gpu/drm/nouveau/nv50_display.h | |||
@@ -35,9 +35,7 @@ | |||
35 | #include "nouveau_crtc.h" | 35 | #include "nouveau_crtc.h" |
36 | #include "nv50_evo.h" | 36 | #include "nv50_evo.h" |
37 | 37 | ||
38 | void nv50_display_irq_handler(struct drm_device *dev); | ||
39 | void nv50_display_irq_handler_bh(struct work_struct *work); | 38 | void nv50_display_irq_handler_bh(struct work_struct *work); |
40 | void nv50_display_irq_hotplug_bh(struct work_struct *work); | ||
41 | int nv50_display_early_init(struct drm_device *dev); | 39 | int nv50_display_early_init(struct drm_device *dev); |
42 | void nv50_display_late_takedown(struct drm_device *dev); | 40 | void nv50_display_late_takedown(struct drm_device *dev); |
43 | int nv50_display_create(struct drm_device *dev); | 41 | int nv50_display_create(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c new file mode 100644 index 000000000000..887b2a97e2a2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_evo.c | |||
@@ -0,0 +1,318 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | |||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_dma.h" | ||
29 | #include "nouveau_ramht.h" | ||
30 | |||
31 | static void | ||
32 | nv50_evo_channel_del(struct nouveau_channel **pevo) | ||
33 | { | ||
34 | struct drm_nouveau_private *dev_priv; | ||
35 | struct nouveau_channel *evo = *pevo; | ||
36 | |||
37 | if (!evo) | ||
38 | return; | ||
39 | *pevo = NULL; | ||
40 | |||
41 | dev_priv = evo->dev->dev_private; | ||
42 | dev_priv->evo_alloc &= ~(1 << evo->id); | ||
43 | |||
44 | nouveau_gpuobj_channel_takedown(evo); | ||
45 | nouveau_bo_unmap(evo->pushbuf_bo); | ||
46 | nouveau_bo_ref(NULL, &evo->pushbuf_bo); | ||
47 | |||
48 | if (evo->user) | ||
49 | iounmap(evo->user); | ||
50 | |||
51 | kfree(evo); | ||
52 | } | ||
53 | |||
54 | int | ||
55 | nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name, | ||
56 | u32 tile_flags, u32 magic_flags, u32 offset, u32 limit) | ||
57 | { | ||
58 | struct drm_nouveau_private *dev_priv = evo->dev->dev_private; | ||
59 | struct drm_device *dev = evo->dev; | ||
60 | struct nouveau_gpuobj *obj = NULL; | ||
61 | int ret; | ||
62 | |||
63 | ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj); | ||
64 | if (ret) | ||
65 | return ret; | ||
66 | obj->engine = NVOBJ_ENGINE_DISPLAY; | ||
67 | |||
68 | nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); | ||
69 | nv_wo32(obj, 4, limit); | ||
70 | nv_wo32(obj, 8, offset); | ||
71 | nv_wo32(obj, 12, 0x00000000); | ||
72 | nv_wo32(obj, 16, 0x00000000); | ||
73 | if (dev_priv->card_type < NV_C0) | ||
74 | nv_wo32(obj, 20, 0x00010000); | ||
75 | else | ||
76 | nv_wo32(obj, 20, 0x00020000); | ||
77 | dev_priv->engine.instmem.flush(dev); | ||
78 | |||
79 | ret = nouveau_ramht_insert(evo, name, obj); | ||
80 | nouveau_gpuobj_ref(NULL, &obj); | ||
81 | if (ret) { | ||
82 | return ret; | ||
83 | } | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static int | ||
89 | nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo) | ||
90 | { | ||
91 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
92 | struct nouveau_channel *evo; | ||
93 | int ret; | ||
94 | |||
95 | evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); | ||
96 | if (!evo) | ||
97 | return -ENOMEM; | ||
98 | *pevo = evo; | ||
99 | |||
100 | for (evo->id = 0; evo->id < 5; evo->id++) { | ||
101 | if (dev_priv->evo_alloc & (1 << evo->id)) | ||
102 | continue; | ||
103 | |||
104 | dev_priv->evo_alloc |= (1 << evo->id); | ||
105 | break; | ||
106 | } | ||
107 | |||
108 | if (evo->id == 5) { | ||
109 | kfree(evo); | ||
110 | return -ENODEV; | ||
111 | } | ||
112 | |||
113 | evo->dev = dev; | ||
114 | evo->user_get = 4; | ||
115 | evo->user_put = 0; | ||
116 | |||
117 | ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, | ||
118 | false, true, &evo->pushbuf_bo); | ||
119 | if (ret == 0) | ||
120 | ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); | ||
121 | if (ret) { | ||
122 | NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); | ||
123 | nv50_evo_channel_del(pevo); | ||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | ret = nouveau_bo_map(evo->pushbuf_bo); | ||
128 | if (ret) { | ||
129 | NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); | ||
130 | nv50_evo_channel_del(pevo); | ||
131 | return ret; | ||
132 | } | ||
133 | |||
134 | evo->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
135 | NV50_PDISPLAY_USER(evo->id), PAGE_SIZE); | ||
136 | if (!evo->user) { | ||
137 | NV_ERROR(dev, "Error mapping EVO control regs.\n"); | ||
138 | nv50_evo_channel_del(pevo); | ||
139 | return -ENOMEM; | ||
140 | } | ||
141 | |||
142 | /* bind primary evo channel's ramht to the channel */ | ||
143 | if (dev_priv->evo && evo != dev_priv->evo) | ||
144 | nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL); | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static int | ||
150 | nv50_evo_channel_init(struct nouveau_channel *evo) | ||
151 | { | ||
152 | struct drm_device *dev = evo->dev; | ||
153 | int id = evo->id, ret, i; | ||
154 | u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT; | ||
155 | u32 tmp; | ||
156 | |||
157 | tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); | ||
158 | if ((tmp & 0x009f0000) == 0x00020000) | ||
159 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000); | ||
160 | |||
161 | tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); | ||
162 | if ((tmp & 0x003f0000) == 0x00030000) | ||
163 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000); | ||
164 | |||
165 | /* initialise fifo */ | ||
166 | nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 | | ||
167 | NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | | ||
168 | NV50_PDISPLAY_EVO_DMA_CB_VALID); | ||
169 | nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); | ||
170 | nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id); | ||
171 | nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA, | ||
172 | NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); | ||
173 | |||
174 | nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000); | ||
175 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | | ||
176 | NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); | ||
177 | if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { | ||
178 | NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id, | ||
179 | nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); | ||
180 | return -EBUSY; | ||
181 | } | ||
182 | |||
183 | /* enable error reporting on the channel */ | ||
184 | nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); | ||
185 | |||
186 | evo->dma.max = (4096/4) - 2; | ||
187 | evo->dma.put = 0; | ||
188 | evo->dma.cur = evo->dma.put; | ||
189 | evo->dma.free = evo->dma.max - evo->dma.cur; | ||
190 | |||
191 | ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); | ||
192 | if (ret) | ||
193 | return ret; | ||
194 | |||
195 | for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) | ||
196 | OUT_RING(evo, 0); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static void | ||
202 | nv50_evo_channel_fini(struct nouveau_channel *evo) | ||
203 | { | ||
204 | struct drm_device *dev = evo->dev; | ||
205 | int id = evo->id; | ||
206 | |||
207 | nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); | ||
208 | nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); | ||
209 | nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id)); | ||
210 | nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000); | ||
211 | if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) { | ||
212 | NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id, | ||
213 | nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); | ||
214 | } | ||
215 | } | ||
216 | |||
217 | static int | ||
218 | nv50_evo_create(struct drm_device *dev) | ||
219 | { | ||
220 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
221 | struct nouveau_gpuobj *ramht = NULL; | ||
222 | struct nouveau_channel *evo; | ||
223 | int ret; | ||
224 | |||
225 | /* create primary evo channel, the one we use for modesetting | ||
226 | * purporses | ||
227 | */ | ||
228 | ret = nv50_evo_channel_new(dev, &dev_priv->evo); | ||
229 | if (ret) | ||
230 | return ret; | ||
231 | evo = dev_priv->evo; | ||
232 | |||
233 | /* setup object management on it, any other evo channel will | ||
234 | * use this also as there's no per-channel support on the | ||
235 | * hardware | ||
236 | */ | ||
237 | ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, | ||
238 | NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); | ||
239 | if (ret) { | ||
240 | NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); | ||
241 | nv50_evo_channel_del(&dev_priv->evo); | ||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | ret = drm_mm_init(&evo->ramin_heap, 0, 32768); | ||
246 | if (ret) { | ||
247 | NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); | ||
248 | nv50_evo_channel_del(&dev_priv->evo); | ||
249 | return ret; | ||
250 | } | ||
251 | |||
252 | ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); | ||
253 | if (ret) { | ||
254 | NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); | ||
255 | nv50_evo_channel_del(&dev_priv->evo); | ||
256 | return ret; | ||
257 | } | ||
258 | |||
259 | ret = nouveau_ramht_new(dev, ramht, &evo->ramht); | ||
260 | nouveau_gpuobj_ref(NULL, &ramht); | ||
261 | if (ret) { | ||
262 | nv50_evo_channel_del(&dev_priv->evo); | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | /* create some default objects for the scanout memtypes we support */ | ||
267 | if (dev_priv->chipset != 0x50) { | ||
268 | ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19, | ||
269 | 0, 0xffffffff); | ||
270 | if (ret) { | ||
271 | nv50_evo_channel_del(&dev_priv->evo); | ||
272 | return ret; | ||
273 | } | ||
274 | |||
275 | |||
276 | ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19, | ||
277 | 0, 0xffffffff); | ||
278 | if (ret) { | ||
279 | nv50_evo_channel_del(&dev_priv->evo); | ||
280 | return ret; | ||
281 | } | ||
282 | } | ||
283 | |||
284 | ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19, | ||
285 | 0, dev_priv->vram_size); | ||
286 | if (ret) { | ||
287 | nv50_evo_channel_del(&dev_priv->evo); | ||
288 | return ret; | ||
289 | } | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | int | ||
295 | nv50_evo_init(struct drm_device *dev) | ||
296 | { | ||
297 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
298 | int ret; | ||
299 | |||
300 | if (!dev_priv->evo) { | ||
301 | ret = nv50_evo_create(dev); | ||
302 | if (ret) | ||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | return nv50_evo_channel_init(dev_priv->evo); | ||
307 | } | ||
308 | |||
309 | void | ||
310 | nv50_evo_fini(struct drm_device *dev) | ||
311 | { | ||
312 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
313 | |||
314 | if (dev_priv->evo) { | ||
315 | nv50_evo_channel_fini(dev_priv->evo); | ||
316 | nv50_evo_channel_del(&dev_priv->evo); | ||
317 | } | ||
318 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h index aae13343bcec..aa4f0d3cea8e 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.h +++ b/drivers/gpu/drm/nouveau/nv50_evo.h | |||
@@ -24,6 +24,15 @@ | |||
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #ifndef __NV50_EVO_H__ | ||
28 | #define __NV50_EVO_H__ | ||
29 | |||
30 | int nv50_evo_init(struct drm_device *dev); | ||
31 | void nv50_evo_fini(struct drm_device *dev); | ||
32 | int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name, | ||
33 | u32 tile_flags, u32 magic_flags, | ||
34 | u32 offset, u32 limit); | ||
35 | |||
27 | #define NV50_EVO_UPDATE 0x00000080 | 36 | #define NV50_EVO_UPDATE 0x00000080 |
28 | #define NV50_EVO_UNK84 0x00000084 | 37 | #define NV50_EVO_UNK84 0x00000084 |
29 | #define NV50_EVO_UNK84_NOTIFY 0x40000000 | 38 | #define NV50_EVO_UNK84_NOTIFY 0x40000000 |
@@ -111,3 +120,4 @@ | |||
111 | #define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 | 120 | #define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 |
112 | #define NV50_EVO_CRTC_SCALE_RES2 0x000008dc | 121 | #define NV50_EVO_CRTC_SCALE_RES2 0x000008dc |
113 | 122 | ||
123 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c index cd1988b15d2c..50290dea0ac4 100644 --- a/drivers/gpu/drm/nouveau/nv50_fb.c +++ b/drivers/gpu/drm/nouveau/nv50_fb.c | |||
@@ -3,30 +3,75 @@ | |||
3 | #include "nouveau_drv.h" | 3 | #include "nouveau_drv.h" |
4 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
5 | 5 | ||
6 | struct nv50_fb_priv { | ||
7 | struct page *r100c08_page; | ||
8 | dma_addr_t r100c08; | ||
9 | }; | ||
10 | |||
11 | static int | ||
12 | nv50_fb_create(struct drm_device *dev) | ||
13 | { | ||
14 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
15 | struct nv50_fb_priv *priv; | ||
16 | |||
17 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
18 | if (!priv) | ||
19 | return -ENOMEM; | ||
20 | |||
21 | priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
22 | if (!priv->r100c08_page) { | ||
23 | kfree(priv); | ||
24 | return -ENOMEM; | ||
25 | } | ||
26 | |||
27 | priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0, | ||
28 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
29 | if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) { | ||
30 | __free_page(priv->r100c08_page); | ||
31 | kfree(priv); | ||
32 | return -EFAULT; | ||
33 | } | ||
34 | |||
35 | dev_priv->engine.fb.priv = priv; | ||
36 | return 0; | ||
37 | } | ||
38 | |||
6 | int | 39 | int |
7 | nv50_fb_init(struct drm_device *dev) | 40 | nv50_fb_init(struct drm_device *dev) |
8 | { | 41 | { |
9 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 42 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
43 | struct nv50_fb_priv *priv; | ||
44 | int ret; | ||
45 | |||
46 | if (!dev_priv->engine.fb.priv) { | ||
47 | ret = nv50_fb_create(dev); | ||
48 | if (ret) | ||
49 | return ret; | ||
50 | } | ||
51 | priv = dev_priv->engine.fb.priv; | ||
10 | 52 | ||
11 | /* Not a clue what this is exactly. Without pointing it at a | 53 | /* Not a clue what this is exactly. Without pointing it at a |
12 | * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) | 54 | * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) |
13 | * cause IOMMU "read from address 0" errors (rh#561267) | 55 | * cause IOMMU "read from address 0" errors (rh#561267) |
14 | */ | 56 | */ |
15 | nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8); | 57 | nv_wr32(dev, 0x100c08, priv->r100c08 >> 8); |
16 | 58 | ||
17 | /* This is needed to get meaningful information from 100c90 | 59 | /* This is needed to get meaningful information from 100c90 |
18 | * on traps. No idea what these values mean exactly. */ | 60 | * on traps. No idea what these values mean exactly. */ |
19 | switch (dev_priv->chipset) { | 61 | switch (dev_priv->chipset) { |
20 | case 0x50: | 62 | case 0x50: |
21 | nv_wr32(dev, 0x100c90, 0x0707ff); | 63 | nv_wr32(dev, 0x100c90, 0x000707ff); |
22 | break; | 64 | break; |
23 | case 0xa3: | 65 | case 0xa3: |
24 | case 0xa5: | 66 | case 0xa5: |
25 | case 0xa8: | 67 | case 0xa8: |
26 | nv_wr32(dev, 0x100c90, 0x0d0fff); | 68 | nv_wr32(dev, 0x100c90, 0x000d0fff); |
69 | break; | ||
70 | case 0xaf: | ||
71 | nv_wr32(dev, 0x100c90, 0x089d1fff); | ||
27 | break; | 72 | break; |
28 | default: | 73 | default: |
29 | nv_wr32(dev, 0x100c90, 0x1d07ff); | 74 | nv_wr32(dev, 0x100c90, 0x001d07ff); |
30 | break; | 75 | break; |
31 | } | 76 | } |
32 | 77 | ||
@@ -36,12 +81,25 @@ nv50_fb_init(struct drm_device *dev) | |||
36 | void | 81 | void |
37 | nv50_fb_takedown(struct drm_device *dev) | 82 | nv50_fb_takedown(struct drm_device *dev) |
38 | { | 83 | { |
84 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
85 | struct nv50_fb_priv *priv; | ||
86 | |||
87 | priv = dev_priv->engine.fb.priv; | ||
88 | if (!priv) | ||
89 | return; | ||
90 | dev_priv->engine.fb.priv = NULL; | ||
91 | |||
92 | pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE, | ||
93 | PCI_DMA_BIDIRECTIONAL); | ||
94 | __free_page(priv->r100c08_page); | ||
95 | kfree(priv); | ||
39 | } | 96 | } |
40 | 97 | ||
41 | void | 98 | void |
42 | nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) | 99 | nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) |
43 | { | 100 | { |
44 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 101 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
102 | unsigned long flags; | ||
45 | u32 trap[6], idx, chinst; | 103 | u32 trap[6], idx, chinst; |
46 | int i, ch; | 104 | int i, ch; |
47 | 105 | ||
@@ -60,8 +118,10 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) | |||
60 | return; | 118 | return; |
61 | 119 | ||
62 | chinst = (trap[2] << 16) | trap[1]; | 120 | chinst = (trap[2] << 16) | trap[1]; |
121 | |||
122 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
63 | for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { | 123 | for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { |
64 | struct nouveau_channel *chan = dev_priv->fifos[ch]; | 124 | struct nouveau_channel *chan = dev_priv->channels.ptr[ch]; |
65 | 125 | ||
66 | if (!chan || !chan->ramin) | 126 | if (!chan || !chan->ramin) |
67 | continue; | 127 | continue; |
@@ -69,6 +129,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) | |||
69 | if (chinst == chan->ramin->vinst >> 12) | 129 | if (chinst == chan->ramin->vinst >> 12) |
70 | break; | 130 | break; |
71 | } | 131 | } |
132 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
72 | 133 | ||
73 | NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x " | 134 | NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x " |
74 | "channel %d (0x%08x)\n", | 135 | "channel %d (0x%08x)\n", |
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 6dcf048eddbc..6d38cb1488ae 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
@@ -3,27 +3,20 @@ | |||
3 | #include "nouveau_dma.h" | 3 | #include "nouveau_dma.h" |
4 | #include "nouveau_ramht.h" | 4 | #include "nouveau_ramht.h" |
5 | #include "nouveau_fbcon.h" | 5 | #include "nouveau_fbcon.h" |
6 | #include "nouveau_mm.h" | ||
6 | 7 | ||
7 | void | 8 | int |
8 | nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 9 | nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
9 | { | 10 | { |
10 | struct nouveau_fbdev *nfbdev = info->par; | 11 | struct nouveau_fbdev *nfbdev = info->par; |
11 | struct drm_device *dev = nfbdev->dev; | 12 | struct drm_device *dev = nfbdev->dev; |
12 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 13 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
13 | struct nouveau_channel *chan = dev_priv->channel; | 14 | struct nouveau_channel *chan = dev_priv->channel; |
15 | int ret; | ||
14 | 16 | ||
15 | if (info->state != FBINFO_STATE_RUNNING) | 17 | ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); |
16 | return; | 18 | if (ret) |
17 | 19 | return ret; | |
18 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && | ||
19 | RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) { | ||
20 | nouveau_fbcon_gpu_lockup(info); | ||
21 | } | ||
22 | |||
23 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | ||
24 | cfb_fillrect(info, rect); | ||
25 | return; | ||
26 | } | ||
27 | 20 | ||
28 | if (rect->rop != ROP_COPY) { | 21 | if (rect->rop != ROP_COPY) { |
29 | BEGIN_RING(chan, NvSub2D, 0x02ac, 1); | 22 | BEGIN_RING(chan, NvSub2D, 0x02ac, 1); |
@@ -45,27 +38,21 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
45 | OUT_RING(chan, 3); | 38 | OUT_RING(chan, 3); |
46 | } | 39 | } |
47 | FIRE_RING(chan); | 40 | FIRE_RING(chan); |
41 | return 0; | ||
48 | } | 42 | } |
49 | 43 | ||
50 | void | 44 | int |
51 | nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | 45 | nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) |
52 | { | 46 | { |
53 | struct nouveau_fbdev *nfbdev = info->par; | 47 | struct nouveau_fbdev *nfbdev = info->par; |
54 | struct drm_device *dev = nfbdev->dev; | 48 | struct drm_device *dev = nfbdev->dev; |
55 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 49 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
56 | struct nouveau_channel *chan = dev_priv->channel; | 50 | struct nouveau_channel *chan = dev_priv->channel; |
51 | int ret; | ||
57 | 52 | ||
58 | if (info->state != FBINFO_STATE_RUNNING) | 53 | ret = RING_SPACE(chan, 12); |
59 | return; | 54 | if (ret) |
60 | 55 | return ret; | |
61 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) { | ||
62 | nouveau_fbcon_gpu_lockup(info); | ||
63 | } | ||
64 | |||
65 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | ||
66 | cfb_copyarea(info, region); | ||
67 | return; | ||
68 | } | ||
69 | 56 | ||
70 | BEGIN_RING(chan, NvSub2D, 0x0110, 1); | 57 | BEGIN_RING(chan, NvSub2D, 0x0110, 1); |
71 | OUT_RING(chan, 0); | 58 | OUT_RING(chan, 0); |
@@ -80,9 +67,10 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |||
80 | OUT_RING(chan, 0); | 67 | OUT_RING(chan, 0); |
81 | OUT_RING(chan, region->sy); | 68 | OUT_RING(chan, region->sy); |
82 | FIRE_RING(chan); | 69 | FIRE_RING(chan); |
70 | return 0; | ||
83 | } | 71 | } |
84 | 72 | ||
85 | void | 73 | int |
86 | nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | 74 | nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) |
87 | { | 75 | { |
88 | struct nouveau_fbdev *nfbdev = info->par; | 76 | struct nouveau_fbdev *nfbdev = info->par; |
@@ -92,23 +80,14 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
92 | uint32_t width, dwords, *data = (uint32_t *)image->data; | 80 | uint32_t width, dwords, *data = (uint32_t *)image->data; |
93 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); | 81 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); |
94 | uint32_t *palette = info->pseudo_palette; | 82 | uint32_t *palette = info->pseudo_palette; |
83 | int ret; | ||
95 | 84 | ||
96 | if (info->state != FBINFO_STATE_RUNNING) | 85 | if (image->depth != 1) |
97 | return; | 86 | return -ENODEV; |
98 | |||
99 | if (image->depth != 1) { | ||
100 | cfb_imageblit(info, image); | ||
101 | return; | ||
102 | } | ||
103 | |||
104 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { | ||
105 | nouveau_fbcon_gpu_lockup(info); | ||
106 | } | ||
107 | 87 | ||
108 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 88 | ret = RING_SPACE(chan, 11); |
109 | cfb_imageblit(info, image); | 89 | if (ret) |
110 | return; | 90 | return ret; |
111 | } | ||
112 | 91 | ||
113 | width = ALIGN(image->width, 32); | 92 | width = ALIGN(image->width, 32); |
114 | dwords = (width * image->height) >> 5; | 93 | dwords = (width * image->height) >> 5; |
@@ -134,11 +113,9 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
134 | while (dwords) { | 113 | while (dwords) { |
135 | int push = dwords > 2047 ? 2047 : dwords; | 114 | int push = dwords > 2047 ? 2047 : dwords; |
136 | 115 | ||
137 | if (RING_SPACE(chan, push + 1)) { | 116 | ret = RING_SPACE(chan, push + 1); |
138 | nouveau_fbcon_gpu_lockup(info); | 117 | if (ret) |
139 | cfb_imageblit(info, image); | 118 | return ret; |
140 | return; | ||
141 | } | ||
142 | 119 | ||
143 | dwords -= push; | 120 | dwords -= push; |
144 | 121 | ||
@@ -148,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
148 | } | 125 | } |
149 | 126 | ||
150 | FIRE_RING(chan); | 127 | FIRE_RING(chan); |
128 | return 0; | ||
151 | } | 129 | } |
152 | 130 | ||
153 | int | 131 | int |
@@ -157,12 +135,9 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
157 | struct drm_device *dev = nfbdev->dev; | 135 | struct drm_device *dev = nfbdev->dev; |
158 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 136 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
159 | struct nouveau_channel *chan = dev_priv->channel; | 137 | struct nouveau_channel *chan = dev_priv->channel; |
160 | struct nouveau_gpuobj *eng2d = NULL; | 138 | struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; |
161 | uint64_t fb; | ||
162 | int ret, format; | 139 | int ret, format; |
163 | 140 | ||
164 | fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base; | ||
165 | |||
166 | switch (info->var.bits_per_pixel) { | 141 | switch (info->var.bits_per_pixel) { |
167 | case 8: | 142 | case 8: |
168 | format = 0xf3; | 143 | format = 0xf3; |
@@ -190,12 +165,7 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
190 | return -EINVAL; | 165 | return -EINVAL; |
191 | } | 166 | } |
192 | 167 | ||
193 | ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d); | 168 | ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d); |
194 | if (ret) | ||
195 | return ret; | ||
196 | |||
197 | ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d); | ||
198 | nouveau_gpuobj_ref(NULL, &eng2d); | ||
199 | if (ret) | 169 | if (ret) |
200 | return ret; | 170 | return ret; |
201 | 171 | ||
@@ -253,8 +223,8 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
253 | OUT_RING(chan, info->fix.line_length); | 223 | OUT_RING(chan, info->fix.line_length); |
254 | OUT_RING(chan, info->var.xres_virtual); | 224 | OUT_RING(chan, info->var.xres_virtual); |
255 | OUT_RING(chan, info->var.yres_virtual); | 225 | OUT_RING(chan, info->var.yres_virtual); |
256 | OUT_RING(chan, upper_32_bits(fb)); | 226 | OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); |
257 | OUT_RING(chan, lower_32_bits(fb)); | 227 | OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); |
258 | BEGIN_RING(chan, NvSub2D, 0x0230, 2); | 228 | BEGIN_RING(chan, NvSub2D, 0x0230, 2); |
259 | OUT_RING(chan, format); | 229 | OUT_RING(chan, format); |
260 | OUT_RING(chan, 1); | 230 | OUT_RING(chan, 1); |
@@ -262,8 +232,8 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
262 | OUT_RING(chan, info->fix.line_length); | 232 | OUT_RING(chan, info->fix.line_length); |
263 | OUT_RING(chan, info->var.xres_virtual); | 233 | OUT_RING(chan, info->var.xres_virtual); |
264 | OUT_RING(chan, info->var.yres_virtual); | 234 | OUT_RING(chan, info->var.yres_virtual); |
265 | OUT_RING(chan, upper_32_bits(fb)); | 235 | OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); |
266 | OUT_RING(chan, lower_32_bits(fb)); | 236 | OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); |
267 | 237 | ||
268 | return 0; | 238 | return 0; |
269 | } | 239 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 1da65bd60c10..8dd04c5dac67 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_ramht.h" | 30 | #include "nouveau_ramht.h" |
31 | #include "nouveau_vm.h" | ||
31 | 32 | ||
32 | static void | 33 | static void |
33 | nv50_fifo_playlist_update(struct drm_device *dev) | 34 | nv50_fifo_playlist_update(struct drm_device *dev) |
@@ -44,7 +45,8 @@ nv50_fifo_playlist_update(struct drm_device *dev) | |||
44 | 45 | ||
45 | /* We never schedule channel 0 or 127 */ | 46 | /* We never schedule channel 0 or 127 */ |
46 | for (i = 1, nr = 0; i < 127; i++) { | 47 | for (i = 1, nr = 0; i < 127; i++) { |
47 | if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { | 48 | if (dev_priv->channels.ptr[i] && |
49 | dev_priv->channels.ptr[i]->ramfc) { | ||
48 | nv_wo32(cur, (nr * 4), i); | 50 | nv_wo32(cur, (nr * 4), i); |
49 | nr++; | 51 | nr++; |
50 | } | 52 | } |
@@ -60,7 +62,7 @@ static void | |||
60 | nv50_fifo_channel_enable(struct drm_device *dev, int channel) | 62 | nv50_fifo_channel_enable(struct drm_device *dev, int channel) |
61 | { | 63 | { |
62 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 64 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
63 | struct nouveau_channel *chan = dev_priv->fifos[channel]; | 65 | struct nouveau_channel *chan = dev_priv->channels.ptr[channel]; |
64 | uint32_t inst; | 66 | uint32_t inst; |
65 | 67 | ||
66 | NV_DEBUG(dev, "ch%d\n", channel); | 68 | NV_DEBUG(dev, "ch%d\n", channel); |
@@ -105,6 +107,7 @@ nv50_fifo_init_intr(struct drm_device *dev) | |||
105 | { | 107 | { |
106 | NV_DEBUG(dev, "\n"); | 108 | NV_DEBUG(dev, "\n"); |
107 | 109 | ||
110 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
108 | nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); | 111 | nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); |
109 | nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); | 112 | nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); |
110 | } | 113 | } |
@@ -118,7 +121,7 @@ nv50_fifo_init_context_table(struct drm_device *dev) | |||
118 | NV_DEBUG(dev, "\n"); | 121 | NV_DEBUG(dev, "\n"); |
119 | 122 | ||
120 | for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { | 123 | for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { |
121 | if (dev_priv->fifos[i]) | 124 | if (dev_priv->channels.ptr[i]) |
122 | nv50_fifo_channel_enable(dev, i); | 125 | nv50_fifo_channel_enable(dev, i); |
123 | else | 126 | else |
124 | nv50_fifo_channel_disable(dev, i); | 127 | nv50_fifo_channel_disable(dev, i); |
@@ -206,6 +209,9 @@ nv50_fifo_takedown(struct drm_device *dev) | |||
206 | if (!pfifo->playlist[0]) | 209 | if (!pfifo->playlist[0]) |
207 | return; | 210 | return; |
208 | 211 | ||
212 | nv_wr32(dev, 0x2140, 0x00000000); | ||
213 | nouveau_irq_unregister(dev, 8); | ||
214 | |||
209 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); | 215 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); |
210 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); | 216 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); |
211 | } | 217 | } |
@@ -256,6 +262,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan) | |||
256 | } | 262 | } |
257 | ramfc = chan->ramfc; | 263 | ramfc = chan->ramfc; |
258 | 264 | ||
265 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
266 | NV50_USER(chan->id), PAGE_SIZE); | ||
267 | if (!chan->user) | ||
268 | return -ENOMEM; | ||
269 | |||
259 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 270 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
260 | 271 | ||
261 | nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); | 272 | nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); |
@@ -291,10 +302,23 @@ void | |||
291 | nv50_fifo_destroy_context(struct nouveau_channel *chan) | 302 | nv50_fifo_destroy_context(struct nouveau_channel *chan) |
292 | { | 303 | { |
293 | struct drm_device *dev = chan->dev; | 304 | struct drm_device *dev = chan->dev; |
305 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
306 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
294 | struct nouveau_gpuobj *ramfc = NULL; | 307 | struct nouveau_gpuobj *ramfc = NULL; |
308 | unsigned long flags; | ||
295 | 309 | ||
296 | NV_DEBUG(dev, "ch%d\n", chan->id); | 310 | NV_DEBUG(dev, "ch%d\n", chan->id); |
297 | 311 | ||
312 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
313 | pfifo->reassign(dev, false); | ||
314 | |||
315 | /* Unload the context if it's the currently active one */ | ||
316 | if (pfifo->channel_id(dev) == chan->id) { | ||
317 | pfifo->disable(dev); | ||
318 | pfifo->unload_context(dev); | ||
319 | pfifo->enable(dev); | ||
320 | } | ||
321 | |||
298 | /* This will ensure the channel is seen as disabled. */ | 322 | /* This will ensure the channel is seen as disabled. */ |
299 | nouveau_gpuobj_ref(chan->ramfc, &ramfc); | 323 | nouveau_gpuobj_ref(chan->ramfc, &ramfc); |
300 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | 324 | nouveau_gpuobj_ref(NULL, &chan->ramfc); |
@@ -305,6 +329,14 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) | |||
305 | nv50_fifo_channel_disable(dev, 127); | 329 | nv50_fifo_channel_disable(dev, 127); |
306 | nv50_fifo_playlist_update(dev); | 330 | nv50_fifo_playlist_update(dev); |
307 | 331 | ||
332 | pfifo->reassign(dev, true); | ||
333 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
334 | |||
335 | /* Free the channel resources */ | ||
336 | if (chan->user) { | ||
337 | iounmap(chan->user); | ||
338 | chan->user = NULL; | ||
339 | } | ||
308 | nouveau_gpuobj_ref(NULL, &ramfc); | 340 | nouveau_gpuobj_ref(NULL, &ramfc); |
309 | nouveau_gpuobj_ref(NULL, &chan->cache); | 341 | nouveau_gpuobj_ref(NULL, &chan->cache); |
310 | } | 342 | } |
@@ -392,7 +424,7 @@ nv50_fifo_unload_context(struct drm_device *dev) | |||
392 | if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) | 424 | if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) |
393 | return 0; | 425 | return 0; |
394 | 426 | ||
395 | chan = dev_priv->fifos[chid]; | 427 | chan = dev_priv->channels.ptr[chid]; |
396 | if (!chan) { | 428 | if (!chan) { |
397 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); | 429 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); |
398 | return -EINVAL; | 430 | return -EINVAL; |
@@ -467,5 +499,5 @@ nv50_fifo_unload_context(struct drm_device *dev) | |||
467 | void | 499 | void |
468 | nv50_fifo_tlb_flush(struct drm_device *dev) | 500 | nv50_fifo_tlb_flush(struct drm_device *dev) |
469 | { | 501 | { |
470 | nv50_vm_flush(dev, 5); | 502 | nv50_vm_flush_engine(dev, 5); |
471 | } | 503 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c index b2fab2bf3d61..6b149c0cc06d 100644 --- a/drivers/gpu/drm/nouveau/nv50_gpio.c +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c | |||
@@ -26,6 +26,28 @@ | |||
26 | #include "nouveau_drv.h" | 26 | #include "nouveau_drv.h" |
27 | #include "nouveau_hw.h" | 27 | #include "nouveau_hw.h" |
28 | 28 | ||
29 | #include "nv50_display.h" | ||
30 | |||
31 | static void nv50_gpio_isr(struct drm_device *dev); | ||
32 | static void nv50_gpio_isr_bh(struct work_struct *work); | ||
33 | |||
34 | struct nv50_gpio_priv { | ||
35 | struct list_head handlers; | ||
36 | spinlock_t lock; | ||
37 | }; | ||
38 | |||
39 | struct nv50_gpio_handler { | ||
40 | struct drm_device *dev; | ||
41 | struct list_head head; | ||
42 | struct work_struct work; | ||
43 | bool inhibit; | ||
44 | |||
45 | struct dcb_gpio_entry *gpio; | ||
46 | |||
47 | void (*handler)(void *data, int state); | ||
48 | void *data; | ||
49 | }; | ||
50 | |||
29 | static int | 51 | static int |
30 | nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) | 52 | nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) |
31 | { | 53 | { |
@@ -75,29 +97,123 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) | |||
75 | return 0; | 97 | return 0; |
76 | } | 98 | } |
77 | 99 | ||
100 | int | ||
101 | nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag, | ||
102 | void (*handler)(void *, int), void *data) | ||
103 | { | ||
104 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
105 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
106 | struct nv50_gpio_priv *priv = pgpio->priv; | ||
107 | struct nv50_gpio_handler *gpioh; | ||
108 | struct dcb_gpio_entry *gpio; | ||
109 | unsigned long flags; | ||
110 | |||
111 | gpio = nouveau_bios_gpio_entry(dev, tag); | ||
112 | if (!gpio) | ||
113 | return -ENOENT; | ||
114 | |||
115 | gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL); | ||
116 | if (!gpioh) | ||
117 | return -ENOMEM; | ||
118 | |||
119 | INIT_WORK(&gpioh->work, nv50_gpio_isr_bh); | ||
120 | gpioh->dev = dev; | ||
121 | gpioh->gpio = gpio; | ||
122 | gpioh->handler = handler; | ||
123 | gpioh->data = data; | ||
124 | |||
125 | spin_lock_irqsave(&priv->lock, flags); | ||
126 | list_add(&gpioh->head, &priv->handlers); | ||
127 | spin_unlock_irqrestore(&priv->lock, flags); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
78 | void | 131 | void |
79 | nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) | 132 | nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag, |
133 | void (*handler)(void *, int), void *data) | ||
80 | { | 134 | { |
135 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
136 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
137 | struct nv50_gpio_priv *priv = pgpio->priv; | ||
138 | struct nv50_gpio_handler *gpioh, *tmp; | ||
81 | struct dcb_gpio_entry *gpio; | 139 | struct dcb_gpio_entry *gpio; |
82 | u32 reg, mask; | 140 | unsigned long flags; |
83 | 141 | ||
84 | gpio = nouveau_bios_gpio_entry(dev, tag); | 142 | gpio = nouveau_bios_gpio_entry(dev, tag); |
85 | if (!gpio) { | 143 | if (!gpio) |
86 | NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag); | ||
87 | return; | 144 | return; |
145 | |||
146 | spin_lock_irqsave(&priv->lock, flags); | ||
147 | list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) { | ||
148 | if (gpioh->gpio != gpio || | ||
149 | gpioh->handler != handler || | ||
150 | gpioh->data != data) | ||
151 | continue; | ||
152 | list_del(&gpioh->head); | ||
153 | kfree(gpioh); | ||
88 | } | 154 | } |
155 | spin_unlock_irqrestore(&priv->lock, flags); | ||
156 | } | ||
157 | |||
158 | bool | ||
159 | nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) | ||
160 | { | ||
161 | struct dcb_gpio_entry *gpio; | ||
162 | u32 reg, mask; | ||
163 | |||
164 | gpio = nouveau_bios_gpio_entry(dev, tag); | ||
165 | if (!gpio) | ||
166 | return false; | ||
89 | 167 | ||
90 | reg = gpio->line < 16 ? 0xe050 : 0xe070; | 168 | reg = gpio->line < 16 ? 0xe050 : 0xe070; |
91 | mask = 0x00010001 << (gpio->line & 0xf); | 169 | mask = 0x00010001 << (gpio->line & 0xf); |
92 | 170 | ||
93 | nv_wr32(dev, reg + 4, mask); | 171 | nv_wr32(dev, reg + 4, mask); |
94 | nv_mask(dev, reg + 0, mask, on ? mask : 0); | 172 | reg = nv_mask(dev, reg + 0, mask, on ? mask : 0); |
173 | return (reg & mask) == mask; | ||
174 | } | ||
175 | |||
176 | static int | ||
177 | nv50_gpio_create(struct drm_device *dev) | ||
178 | { | ||
179 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
180 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
181 | struct nv50_gpio_priv *priv; | ||
182 | |||
183 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
184 | if (!priv) | ||
185 | return -ENOMEM; | ||
186 | |||
187 | INIT_LIST_HEAD(&priv->handlers); | ||
188 | spin_lock_init(&priv->lock); | ||
189 | pgpio->priv = priv; | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | static void | ||
194 | nv50_gpio_destroy(struct drm_device *dev) | ||
195 | { | ||
196 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
197 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
198 | |||
199 | kfree(pgpio->priv); | ||
200 | pgpio->priv = NULL; | ||
95 | } | 201 | } |
96 | 202 | ||
97 | int | 203 | int |
98 | nv50_gpio_init(struct drm_device *dev) | 204 | nv50_gpio_init(struct drm_device *dev) |
99 | { | 205 | { |
100 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 206 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
207 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
208 | struct nv50_gpio_priv *priv; | ||
209 | int ret; | ||
210 | |||
211 | if (!pgpio->priv) { | ||
212 | ret = nv50_gpio_create(dev); | ||
213 | if (ret) | ||
214 | return ret; | ||
215 | } | ||
216 | priv = pgpio->priv; | ||
101 | 217 | ||
102 | /* disable, and ack any pending gpio interrupts */ | 218 | /* disable, and ack any pending gpio interrupts */ |
103 | nv_wr32(dev, 0xe050, 0x00000000); | 219 | nv_wr32(dev, 0xe050, 0x00000000); |
@@ -107,5 +223,77 @@ nv50_gpio_init(struct drm_device *dev) | |||
107 | nv_wr32(dev, 0xe074, 0xffffffff); | 223 | nv_wr32(dev, 0xe074, 0xffffffff); |
108 | } | 224 | } |
109 | 225 | ||
226 | nouveau_irq_register(dev, 21, nv50_gpio_isr); | ||
110 | return 0; | 227 | return 0; |
111 | } | 228 | } |
229 | |||
230 | void | ||
231 | nv50_gpio_fini(struct drm_device *dev) | ||
232 | { | ||
233 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
234 | |||
235 | nv_wr32(dev, 0xe050, 0x00000000); | ||
236 | if (dev_priv->chipset >= 0x90) | ||
237 | nv_wr32(dev, 0xe070, 0x00000000); | ||
238 | nouveau_irq_unregister(dev, 21); | ||
239 | |||
240 | nv50_gpio_destroy(dev); | ||
241 | } | ||
242 | |||
243 | static void | ||
244 | nv50_gpio_isr_bh(struct work_struct *work) | ||
245 | { | ||
246 | struct nv50_gpio_handler *gpioh = | ||
247 | container_of(work, struct nv50_gpio_handler, work); | ||
248 | struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private; | ||
249 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
250 | struct nv50_gpio_priv *priv = pgpio->priv; | ||
251 | unsigned long flags; | ||
252 | int state; | ||
253 | |||
254 | state = pgpio->get(gpioh->dev, gpioh->gpio->tag); | ||
255 | if (state < 0) | ||
256 | return; | ||
257 | |||
258 | gpioh->handler(gpioh->data, state); | ||
259 | |||
260 | spin_lock_irqsave(&priv->lock, flags); | ||
261 | gpioh->inhibit = false; | ||
262 | spin_unlock_irqrestore(&priv->lock, flags); | ||
263 | } | ||
264 | |||
265 | static void | ||
266 | nv50_gpio_isr(struct drm_device *dev) | ||
267 | { | ||
268 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
269 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
270 | struct nv50_gpio_priv *priv = pgpio->priv; | ||
271 | struct nv50_gpio_handler *gpioh; | ||
272 | u32 intr0, intr1 = 0; | ||
273 | u32 hi, lo, ch; | ||
274 | |||
275 | intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); | ||
276 | if (dev_priv->chipset >= 0x90) | ||
277 | intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); | ||
278 | |||
279 | hi = (intr0 & 0x0000ffff) | (intr1 << 16); | ||
280 | lo = (intr0 >> 16) | (intr1 & 0xffff0000); | ||
281 | ch = hi | lo; | ||
282 | |||
283 | nv_wr32(dev, 0xe054, intr0); | ||
284 | if (dev_priv->chipset >= 0x90) | ||
285 | nv_wr32(dev, 0xe074, intr1); | ||
286 | |||
287 | spin_lock(&priv->lock); | ||
288 | list_for_each_entry(gpioh, &priv->handlers, head) { | ||
289 | if (!(ch & (1 << gpioh->gpio->line))) | ||
290 | continue; | ||
291 | |||
292 | if (gpioh->inhibit) | ||
293 | continue; | ||
294 | gpioh->inhibit = true; | ||
295 | |||
296 | queue_work(dev_priv->wq, &gpioh->work); | ||
297 | } | ||
298 | spin_unlock(&priv->lock); | ||
299 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 8b669d0af610..c510e74acf4d 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -29,6 +29,12 @@ | |||
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_ramht.h" | 30 | #include "nouveau_ramht.h" |
31 | #include "nouveau_grctx.h" | 31 | #include "nouveau_grctx.h" |
32 | #include "nouveau_dma.h" | ||
33 | #include "nouveau_vm.h" | ||
34 | #include "nv50_evo.h" | ||
35 | |||
36 | static int nv50_graph_register(struct drm_device *); | ||
37 | static void nv50_graph_isr(struct drm_device *); | ||
32 | 38 | ||
33 | static void | 39 | static void |
34 | nv50_graph_init_reset(struct drm_device *dev) | 40 | nv50_graph_init_reset(struct drm_device *dev) |
@@ -46,6 +52,7 @@ nv50_graph_init_intr(struct drm_device *dev) | |||
46 | { | 52 | { |
47 | NV_DEBUG(dev, "\n"); | 53 | NV_DEBUG(dev, "\n"); |
48 | 54 | ||
55 | nouveau_irq_register(dev, 12, nv50_graph_isr); | ||
49 | nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); | 56 | nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); |
50 | nv_wr32(dev, 0x400138, 0xffffffff); | 57 | nv_wr32(dev, 0x400138, 0xffffffff); |
51 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); | 58 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); |
@@ -145,12 +152,15 @@ nv50_graph_init(struct drm_device *dev) | |||
145 | nv50_graph_init_reset(dev); | 152 | nv50_graph_init_reset(dev); |
146 | nv50_graph_init_regs__nv(dev); | 153 | nv50_graph_init_regs__nv(dev); |
147 | nv50_graph_init_regs(dev); | 154 | nv50_graph_init_regs(dev); |
148 | nv50_graph_init_intr(dev); | ||
149 | 155 | ||
150 | ret = nv50_graph_init_ctxctl(dev); | 156 | ret = nv50_graph_init_ctxctl(dev); |
151 | if (ret) | 157 | if (ret) |
152 | return ret; | 158 | return ret; |
153 | 159 | ||
160 | ret = nv50_graph_register(dev); | ||
161 | if (ret) | ||
162 | return ret; | ||
163 | nv50_graph_init_intr(dev); | ||
154 | return 0; | 164 | return 0; |
155 | } | 165 | } |
156 | 166 | ||
@@ -158,6 +168,8 @@ void | |||
158 | nv50_graph_takedown(struct drm_device *dev) | 168 | nv50_graph_takedown(struct drm_device *dev) |
159 | { | 169 | { |
160 | NV_DEBUG(dev, "\n"); | 170 | NV_DEBUG(dev, "\n"); |
171 | nv_wr32(dev, 0x40013c, 0x00000000); | ||
172 | nouveau_irq_unregister(dev, 12); | ||
161 | } | 173 | } |
162 | 174 | ||
163 | void | 175 | void |
@@ -190,7 +202,7 @@ nv50_graph_channel(struct drm_device *dev) | |||
190 | inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; | 202 | inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; |
191 | 203 | ||
192 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 204 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
193 | struct nouveau_channel *chan = dev_priv->fifos[i]; | 205 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; |
194 | 206 | ||
195 | if (chan && chan->ramin && chan->ramin->vinst == inst) | 207 | if (chan && chan->ramin && chan->ramin->vinst == inst) |
196 | return chan; | 208 | return chan; |
@@ -211,7 +223,7 @@ nv50_graph_create_context(struct nouveau_channel *chan) | |||
211 | 223 | ||
212 | NV_DEBUG(dev, "ch%d\n", chan->id); | 224 | NV_DEBUG(dev, "ch%d\n", chan->id); |
213 | 225 | ||
214 | ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000, | 226 | ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0, |
215 | NVOBJ_FLAG_ZERO_ALLOC | | 227 | NVOBJ_FLAG_ZERO_ALLOC | |
216 | NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); | 228 | NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); |
217 | if (ret) | 229 | if (ret) |
@@ -234,6 +246,7 @@ nv50_graph_create_context(struct nouveau_channel *chan) | |||
234 | nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); | 246 | nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); |
235 | 247 | ||
236 | dev_priv->engine.instmem.flush(dev); | 248 | dev_priv->engine.instmem.flush(dev); |
249 | atomic_inc(&chan->vm->pgraph_refs); | ||
237 | return 0; | 250 | return 0; |
238 | } | 251 | } |
239 | 252 | ||
@@ -242,18 +255,31 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) | |||
242 | { | 255 | { |
243 | struct drm_device *dev = chan->dev; | 256 | struct drm_device *dev = chan->dev; |
244 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 257 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
258 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
245 | int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; | 259 | int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; |
260 | unsigned long flags; | ||
246 | 261 | ||
247 | NV_DEBUG(dev, "ch%d\n", chan->id); | 262 | NV_DEBUG(dev, "ch%d\n", chan->id); |
248 | 263 | ||
249 | if (!chan->ramin) | 264 | if (!chan->ramin) |
250 | return; | 265 | return; |
251 | 266 | ||
267 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
268 | pgraph->fifo_access(dev, false); | ||
269 | |||
270 | if (pgraph->channel(dev) == chan) | ||
271 | pgraph->unload_context(dev); | ||
272 | |||
252 | for (i = hdr; i < hdr + 24; i += 4) | 273 | for (i = hdr; i < hdr + 24; i += 4) |
253 | nv_wo32(chan->ramin, i, 0); | 274 | nv_wo32(chan->ramin, i, 0); |
254 | dev_priv->engine.instmem.flush(dev); | 275 | dev_priv->engine.instmem.flush(dev); |
255 | 276 | ||
277 | pgraph->fifo_access(dev, true); | ||
278 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
279 | |||
256 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); | 280 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); |
281 | |||
282 | atomic_dec(&chan->vm->pgraph_refs); | ||
257 | } | 283 | } |
258 | 284 | ||
259 | static int | 285 | static int |
@@ -306,7 +332,7 @@ nv50_graph_unload_context(struct drm_device *dev) | |||
306 | return 0; | 332 | return 0; |
307 | } | 333 | } |
308 | 334 | ||
309 | void | 335 | static void |
310 | nv50_graph_context_switch(struct drm_device *dev) | 336 | nv50_graph_context_switch(struct drm_device *dev) |
311 | { | 337 | { |
312 | uint32_t inst; | 338 | uint32_t inst; |
@@ -322,8 +348,8 @@ nv50_graph_context_switch(struct drm_device *dev) | |||
322 | } | 348 | } |
323 | 349 | ||
324 | static int | 350 | static int |
325 | nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, | 351 | nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, |
326 | int mthd, uint32_t data) | 352 | u32 class, u32 mthd, u32 data) |
327 | { | 353 | { |
328 | struct nouveau_gpuobj *gpuobj; | 354 | struct nouveau_gpuobj *gpuobj; |
329 | 355 | ||
@@ -340,8 +366,8 @@ nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, | |||
340 | } | 366 | } |
341 | 367 | ||
342 | static int | 368 | static int |
343 | nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass, | 369 | nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, |
344 | int mthd, uint32_t data) | 370 | u32 class, u32 mthd, u32 data) |
345 | { | 371 | { |
346 | if (nouveau_notifier_offset(chan->nvsw.vblsem, &data)) | 372 | if (nouveau_notifier_offset(chan->nvsw.vblsem, &data)) |
347 | return -ERANGE; | 373 | return -ERANGE; |
@@ -351,16 +377,16 @@ nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass, | |||
351 | } | 377 | } |
352 | 378 | ||
353 | static int | 379 | static int |
354 | nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass, | 380 | nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, |
355 | int mthd, uint32_t data) | 381 | u32 class, u32 mthd, u32 data) |
356 | { | 382 | { |
357 | chan->nvsw.vblsem_rval = data; | 383 | chan->nvsw.vblsem_rval = data; |
358 | return 0; | 384 | return 0; |
359 | } | 385 | } |
360 | 386 | ||
361 | static int | 387 | static int |
362 | nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass, | 388 | nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, |
363 | int mthd, uint32_t data) | 389 | u32 class, u32 mthd, u32 data) |
364 | { | 390 | { |
365 | struct drm_device *dev = chan->dev; | 391 | struct drm_device *dev = chan->dev; |
366 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 392 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -368,45 +394,85 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass, | |||
368 | if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) | 394 | if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) |
369 | return -EINVAL; | 395 | return -EINVAL; |
370 | 396 | ||
371 | if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) & | 397 | drm_vblank_get(dev, data); |
372 | NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) { | ||
373 | nv_wr32(dev, NV50_PDISPLAY_INTR_1, | ||
374 | NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data)); | ||
375 | nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, | ||
376 | NV50_PDISPLAY_INTR_EN) | | ||
377 | NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data)); | ||
378 | } | ||
379 | 398 | ||
399 | chan->nvsw.vblsem_head = data; | ||
380 | list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); | 400 | list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); |
401 | |||
381 | return 0; | 402 | return 0; |
382 | } | 403 | } |
383 | 404 | ||
384 | static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = { | 405 | static int |
385 | { 0x018c, nv50_graph_nvsw_dma_vblsem }, | 406 | nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan, |
386 | { 0x0400, nv50_graph_nvsw_vblsem_offset }, | 407 | u32 class, u32 mthd, u32 data) |
387 | { 0x0404, nv50_graph_nvsw_vblsem_release_val }, | 408 | { |
388 | { 0x0408, nv50_graph_nvsw_vblsem_release }, | 409 | struct nouveau_page_flip_state s; |
389 | {} | ||
390 | }; | ||
391 | 410 | ||
392 | struct nouveau_pgraph_object_class nv50_graph_grclass[] = { | 411 | if (!nouveau_finish_page_flip(chan, &s)) { |
393 | { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */ | 412 | /* XXX - Do something here */ |
394 | { 0x0030, false, NULL }, /* null */ | 413 | } |
395 | { 0x5039, false, NULL }, /* m2mf */ | 414 | |
396 | { 0x502d, false, NULL }, /* 2d */ | 415 | return 0; |
397 | { 0x50c0, false, NULL }, /* compute */ | 416 | } |
398 | { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */ | 417 | |
399 | { 0x5097, false, NULL }, /* tesla (nv50) */ | 418 | static int |
400 | { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */ | 419 | nv50_graph_register(struct drm_device *dev) |
401 | { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */ | 420 | { |
402 | { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ | 421 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
403 | {} | 422 | |
404 | }; | 423 | if (dev_priv->engine.graph.registered) |
424 | return 0; | ||
425 | |||
426 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
427 | NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem); | ||
428 | NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset); | ||
429 | NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val); | ||
430 | NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release); | ||
431 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip); | ||
432 | |||
433 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
434 | NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ | ||
435 | NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */ | ||
436 | |||
437 | /* tesla */ | ||
438 | if (dev_priv->chipset == 0x50) | ||
439 | NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */ | ||
440 | else | ||
441 | if (dev_priv->chipset < 0xa0) | ||
442 | NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */ | ||
443 | else { | ||
444 | switch (dev_priv->chipset) { | ||
445 | case 0xa0: | ||
446 | case 0xaa: | ||
447 | case 0xac: | ||
448 | NVOBJ_CLASS(dev, 0x8397, GR); | ||
449 | break; | ||
450 | case 0xa3: | ||
451 | case 0xa5: | ||
452 | case 0xa8: | ||
453 | NVOBJ_CLASS(dev, 0x8597, GR); | ||
454 | break; | ||
455 | case 0xaf: | ||
456 | NVOBJ_CLASS(dev, 0x8697, GR); | ||
457 | break; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | /* compute */ | ||
462 | NVOBJ_CLASS(dev, 0x50c0, GR); | ||
463 | if (dev_priv->chipset > 0xa0 && | ||
464 | dev_priv->chipset != 0xaa && | ||
465 | dev_priv->chipset != 0xac) | ||
466 | NVOBJ_CLASS(dev, 0x85c0, GR); | ||
467 | |||
468 | dev_priv->engine.graph.registered = true; | ||
469 | return 0; | ||
470 | } | ||
405 | 471 | ||
406 | void | 472 | void |
407 | nv50_graph_tlb_flush(struct drm_device *dev) | 473 | nv50_graph_tlb_flush(struct drm_device *dev) |
408 | { | 474 | { |
409 | nv50_vm_flush(dev, 0); | 475 | nv50_vm_flush_engine(dev, 0); |
410 | } | 476 | } |
411 | 477 | ||
412 | void | 478 | void |
@@ -449,8 +515,500 @@ nv86_graph_tlb_flush(struct drm_device *dev) | |||
449 | nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); | 515 | nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); |
450 | } | 516 | } |
451 | 517 | ||
452 | nv50_vm_flush(dev, 0); | 518 | nv50_vm_flush_engine(dev, 0); |
453 | 519 | ||
454 | nv_mask(dev, 0x400500, 0x00000001, 0x00000001); | 520 | nv_mask(dev, 0x400500, 0x00000001, 0x00000001); |
455 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 521 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
456 | } | 522 | } |
523 | |||
524 | static struct nouveau_enum nv50_mp_exec_error_names[] = | ||
525 | { | ||
526 | { 3, "STACK_UNDERFLOW" }, | ||
527 | { 4, "QUADON_ACTIVE" }, | ||
528 | { 8, "TIMEOUT" }, | ||
529 | { 0x10, "INVALID_OPCODE" }, | ||
530 | { 0x40, "BREAKPOINT" }, | ||
531 | {} | ||
532 | }; | ||
533 | |||
534 | static struct nouveau_bitfield nv50_graph_trap_m2mf[] = { | ||
535 | { 0x00000001, "NOTIFY" }, | ||
536 | { 0x00000002, "IN" }, | ||
537 | { 0x00000004, "OUT" }, | ||
538 | {} | ||
539 | }; | ||
540 | |||
541 | static struct nouveau_bitfield nv50_graph_trap_vfetch[] = { | ||
542 | { 0x00000001, "FAULT" }, | ||
543 | {} | ||
544 | }; | ||
545 | |||
546 | static struct nouveau_bitfield nv50_graph_trap_strmout[] = { | ||
547 | { 0x00000001, "FAULT" }, | ||
548 | {} | ||
549 | }; | ||
550 | |||
551 | static struct nouveau_bitfield nv50_graph_trap_ccache[] = { | ||
552 | { 0x00000001, "FAULT" }, | ||
553 | {} | ||
554 | }; | ||
555 | |||
556 | /* There must be a *lot* of these. Will take some time to gather them up. */ | ||
557 | static struct nouveau_enum nv50_data_error_names[] = { | ||
558 | { 4, "INVALID_VALUE" }, | ||
559 | { 5, "INVALID_ENUM" }, | ||
560 | { 8, "INVALID_OBJECT" }, | ||
561 | { 0xc, "INVALID_BITFIELD" }, | ||
562 | { 0x28, "MP_NO_REG_SPACE" }, | ||
563 | { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, | ||
564 | {} | ||
565 | }; | ||
566 | |||
567 | static struct nouveau_bitfield nv50_graph_intr[] = { | ||
568 | { 0x00000001, "NOTIFY" }, | ||
569 | { 0x00000002, "COMPUTE_QUERY" }, | ||
570 | { 0x00000010, "ILLEGAL_MTHD" }, | ||
571 | { 0x00000020, "ILLEGAL_CLASS" }, | ||
572 | { 0x00000040, "DOUBLE_NOTIFY" }, | ||
573 | { 0x00001000, "CONTEXT_SWITCH" }, | ||
574 | { 0x00010000, "BUFFER_NOTIFY" }, | ||
575 | { 0x00100000, "DATA_ERROR" }, | ||
576 | { 0x00200000, "TRAP" }, | ||
577 | { 0x01000000, "SINGLE_STEP" }, | ||
578 | {} | ||
579 | }; | ||
580 | |||
581 | static void | ||
582 | nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) | ||
583 | { | ||
584 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
585 | uint32_t units = nv_rd32(dev, 0x1540); | ||
586 | uint32_t addr, mp10, status, pc, oplow, ophigh; | ||
587 | int i; | ||
588 | int mps = 0; | ||
589 | for (i = 0; i < 4; i++) { | ||
590 | if (!(units & 1 << (i+24))) | ||
591 | continue; | ||
592 | if (dev_priv->chipset < 0xa0) | ||
593 | addr = 0x408200 + (tpid << 12) + (i << 7); | ||
594 | else | ||
595 | addr = 0x408100 + (tpid << 11) + (i << 7); | ||
596 | mp10 = nv_rd32(dev, addr + 0x10); | ||
597 | status = nv_rd32(dev, addr + 0x14); | ||
598 | if (!status) | ||
599 | continue; | ||
600 | if (display) { | ||
601 | nv_rd32(dev, addr + 0x20); | ||
602 | pc = nv_rd32(dev, addr + 0x24); | ||
603 | oplow = nv_rd32(dev, addr + 0x70); | ||
604 | ophigh= nv_rd32(dev, addr + 0x74); | ||
605 | NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " | ||
606 | "TP %d MP %d: ", tpid, i); | ||
607 | nouveau_enum_print(nv50_mp_exec_error_names, status); | ||
608 | printk(" at %06x warp %d, opcode %08x %08x\n", | ||
609 | pc&0xffffff, pc >> 24, | ||
610 | oplow, ophigh); | ||
611 | } | ||
612 | nv_wr32(dev, addr + 0x10, mp10); | ||
613 | nv_wr32(dev, addr + 0x14, 0); | ||
614 | mps++; | ||
615 | } | ||
616 | if (!mps && display) | ||
617 | NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " | ||
618 | "No MPs claiming errors?\n", tpid); | ||
619 | } | ||
620 | |||
621 | static void | ||
622 | nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, | ||
623 | uint32_t ustatus_new, int display, const char *name) | ||
624 | { | ||
625 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
626 | int tps = 0; | ||
627 | uint32_t units = nv_rd32(dev, 0x1540); | ||
628 | int i, r; | ||
629 | uint32_t ustatus_addr, ustatus; | ||
630 | for (i = 0; i < 16; i++) { | ||
631 | if (!(units & (1 << i))) | ||
632 | continue; | ||
633 | if (dev_priv->chipset < 0xa0) | ||
634 | ustatus_addr = ustatus_old + (i << 12); | ||
635 | else | ||
636 | ustatus_addr = ustatus_new + (i << 11); | ||
637 | ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; | ||
638 | if (!ustatus) | ||
639 | continue; | ||
640 | tps++; | ||
641 | switch (type) { | ||
642 | case 6: /* texture error... unknown for now */ | ||
643 | nv50_fb_vm_trap(dev, display, name); | ||
644 | if (display) { | ||
645 | NV_ERROR(dev, "magic set %d:\n", i); | ||
646 | for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) | ||
647 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
648 | nv_rd32(dev, r)); | ||
649 | } | ||
650 | break; | ||
651 | case 7: /* MP error */ | ||
652 | if (ustatus & 0x00010000) { | ||
653 | nv50_pgraph_mp_trap(dev, i, display); | ||
654 | ustatus &= ~0x00010000; | ||
655 | } | ||
656 | break; | ||
657 | case 8: /* TPDMA error */ | ||
658 | { | ||
659 | uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); | ||
660 | uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); | ||
661 | uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); | ||
662 | uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); | ||
663 | uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); | ||
664 | uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); | ||
665 | uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); | ||
666 | nv50_fb_vm_trap(dev, display, name); | ||
667 | /* 2d engine destination */ | ||
668 | if (ustatus & 0x00000010) { | ||
669 | if (display) { | ||
670 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", | ||
671 | i, e14, e10); | ||
672 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
673 | i, e0c, e18, e1c, e20, e24); | ||
674 | } | ||
675 | ustatus &= ~0x00000010; | ||
676 | } | ||
677 | /* Render target */ | ||
678 | if (ustatus & 0x00000040) { | ||
679 | if (display) { | ||
680 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", | ||
681 | i, e14, e10); | ||
682 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
683 | i, e0c, e18, e1c, e20, e24); | ||
684 | } | ||
685 | ustatus &= ~0x00000040; | ||
686 | } | ||
687 | /* CUDA memory: l[], g[] or stack. */ | ||
688 | if (ustatus & 0x00000080) { | ||
689 | if (display) { | ||
690 | if (e18 & 0x80000000) { | ||
691 | /* g[] read fault? */ | ||
692 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", | ||
693 | i, e14, e10 | ((e18 >> 24) & 0x1f)); | ||
694 | e18 &= ~0x1f000000; | ||
695 | } else if (e18 & 0xc) { | ||
696 | /* g[] write fault? */ | ||
697 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", | ||
698 | i, e14, e10 | ((e18 >> 7) & 0x1f)); | ||
699 | e18 &= ~0x00000f80; | ||
700 | } else { | ||
701 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", | ||
702 | i, e14, e10); | ||
703 | } | ||
704 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
705 | i, e0c, e18, e1c, e20, e24); | ||
706 | } | ||
707 | ustatus &= ~0x00000080; | ||
708 | } | ||
709 | } | ||
710 | break; | ||
711 | } | ||
712 | if (ustatus) { | ||
713 | if (display) | ||
714 | NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); | ||
715 | } | ||
716 | nv_wr32(dev, ustatus_addr, 0xc0000000); | ||
717 | } | ||
718 | |||
719 | if (!tps && display) | ||
720 | NV_INFO(dev, "%s - No TPs claiming errors?\n", name); | ||
721 | } | ||
722 | |||
723 | static int | ||
724 | nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid) | ||
725 | { | ||
726 | u32 status = nv_rd32(dev, 0x400108); | ||
727 | u32 ustatus; | ||
728 | |||
729 | if (!status && display) { | ||
730 | NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n"); | ||
731 | return 1; | ||
732 | } | ||
733 | |||
734 | /* DISPATCH: Relays commands to other units and handles NOTIFY, | ||
735 | * COND, QUERY. If you get a trap from it, the command is still stuck | ||
736 | * in DISPATCH and you need to do something about it. */ | ||
737 | if (status & 0x001) { | ||
738 | ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; | ||
739 | if (!ustatus && display) { | ||
740 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); | ||
741 | } | ||
742 | |||
743 | nv_wr32(dev, 0x400500, 0x00000000); | ||
744 | |||
745 | /* Known to be triggered by screwed up NOTIFY and COND... */ | ||
746 | if (ustatus & 0x00000001) { | ||
747 | u32 addr = nv_rd32(dev, 0x400808); | ||
748 | u32 subc = (addr & 0x00070000) >> 16; | ||
749 | u32 mthd = (addr & 0x00001ffc); | ||
750 | u32 datal = nv_rd32(dev, 0x40080c); | ||
751 | u32 datah = nv_rd32(dev, 0x400810); | ||
752 | u32 class = nv_rd32(dev, 0x400814); | ||
753 | u32 r848 = nv_rd32(dev, 0x400848); | ||
754 | |||
755 | NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n"); | ||
756 | if (display && (addr & 0x80000000)) { | ||
757 | NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " | ||
758 | "subc %d class 0x%04x mthd 0x%04x " | ||
759 | "data 0x%08x%08x " | ||
760 | "400808 0x%08x 400848 0x%08x\n", | ||
761 | chid, inst, subc, class, mthd, datah, | ||
762 | datal, addr, r848); | ||
763 | } else | ||
764 | if (display) { | ||
765 | NV_INFO(dev, "PGRAPH - no stuck command?\n"); | ||
766 | } | ||
767 | |||
768 | nv_wr32(dev, 0x400808, 0); | ||
769 | nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); | ||
770 | nv_wr32(dev, 0x400848, 0); | ||
771 | ustatus &= ~0x00000001; | ||
772 | } | ||
773 | |||
774 | if (ustatus & 0x00000002) { | ||
775 | u32 addr = nv_rd32(dev, 0x40084c); | ||
776 | u32 subc = (addr & 0x00070000) >> 16; | ||
777 | u32 mthd = (addr & 0x00001ffc); | ||
778 | u32 data = nv_rd32(dev, 0x40085c); | ||
779 | u32 class = nv_rd32(dev, 0x400814); | ||
780 | |||
781 | NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n"); | ||
782 | if (display && (addr & 0x80000000)) { | ||
783 | NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " | ||
784 | "subc %d class 0x%04x mthd 0x%04x " | ||
785 | "data 0x%08x 40084c 0x%08x\n", | ||
786 | chid, inst, subc, class, mthd, | ||
787 | data, addr); | ||
788 | } else | ||
789 | if (display) { | ||
790 | NV_INFO(dev, "PGRAPH - no stuck command?\n"); | ||
791 | } | ||
792 | |||
793 | nv_wr32(dev, 0x40084c, 0); | ||
794 | ustatus &= ~0x00000002; | ||
795 | } | ||
796 | |||
797 | if (ustatus && display) { | ||
798 | NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown " | ||
799 | "0x%08x)\n", ustatus); | ||
800 | } | ||
801 | |||
802 | nv_wr32(dev, 0x400804, 0xc0000000); | ||
803 | nv_wr32(dev, 0x400108, 0x001); | ||
804 | status &= ~0x001; | ||
805 | if (!status) | ||
806 | return 0; | ||
807 | } | ||
808 | |||
809 | /* M2MF: Memory to memory copy engine. */ | ||
810 | if (status & 0x002) { | ||
811 | u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; | ||
812 | if (display) { | ||
813 | NV_INFO(dev, "PGRAPH - TRAP_M2MF"); | ||
814 | nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus); | ||
815 | printk("\n"); | ||
816 | NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n", | ||
817 | nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808), | ||
818 | nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810)); | ||
819 | |||
820 | } | ||
821 | |||
822 | /* No sane way found yet -- just reset the bugger. */ | ||
823 | nv_wr32(dev, 0x400040, 2); | ||
824 | nv_wr32(dev, 0x400040, 0); | ||
825 | nv_wr32(dev, 0x406800, 0xc0000000); | ||
826 | nv_wr32(dev, 0x400108, 0x002); | ||
827 | status &= ~0x002; | ||
828 | } | ||
829 | |||
830 | /* VFETCH: Fetches data from vertex buffers. */ | ||
831 | if (status & 0x004) { | ||
832 | u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; | ||
833 | if (display) { | ||
834 | NV_INFO(dev, "PGRAPH - TRAP_VFETCH"); | ||
835 | nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus); | ||
836 | printk("\n"); | ||
837 | NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n", | ||
838 | nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08), | ||
839 | nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10)); | ||
840 | } | ||
841 | |||
842 | nv_wr32(dev, 0x400c04, 0xc0000000); | ||
843 | nv_wr32(dev, 0x400108, 0x004); | ||
844 | status &= ~0x004; | ||
845 | } | ||
846 | |||
847 | /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ | ||
848 | if (status & 0x008) { | ||
849 | ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; | ||
850 | if (display) { | ||
851 | NV_INFO(dev, "PGRAPH - TRAP_STRMOUT"); | ||
852 | nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus); | ||
853 | printk("\n"); | ||
854 | NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n", | ||
855 | nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808), | ||
856 | nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810)); | ||
857 | |||
858 | } | ||
859 | |||
860 | /* No sane way found yet -- just reset the bugger. */ | ||
861 | nv_wr32(dev, 0x400040, 0x80); | ||
862 | nv_wr32(dev, 0x400040, 0); | ||
863 | nv_wr32(dev, 0x401800, 0xc0000000); | ||
864 | nv_wr32(dev, 0x400108, 0x008); | ||
865 | status &= ~0x008; | ||
866 | } | ||
867 | |||
868 | /* CCACHE: Handles code and c[] caches and fills them. */ | ||
869 | if (status & 0x010) { | ||
870 | ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; | ||
871 | if (display) { | ||
872 | NV_INFO(dev, "PGRAPH - TRAP_CCACHE"); | ||
873 | nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus); | ||
874 | printk("\n"); | ||
875 | NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x" | ||
876 | " %08x %08x %08x\n", | ||
877 | nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804), | ||
878 | nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c), | ||
879 | nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814), | ||
880 | nv_rd32(dev, 0x40581c)); | ||
881 | |||
882 | } | ||
883 | |||
884 | nv_wr32(dev, 0x405018, 0xc0000000); | ||
885 | nv_wr32(dev, 0x400108, 0x010); | ||
886 | status &= ~0x010; | ||
887 | } | ||
888 | |||
889 | /* Unknown, not seen yet... 0x402000 is the only trap status reg | ||
890 | * remaining, so try to handle it anyway. Perhaps related to that | ||
891 | * unknown DMA slot on tesla? */ | ||
892 | if (status & 0x20) { | ||
893 | ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; | ||
894 | if (display) | ||
895 | NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus); | ||
896 | nv_wr32(dev, 0x402000, 0xc0000000); | ||
897 | /* no status modifiction on purpose */ | ||
898 | } | ||
899 | |||
900 | /* TEXTURE: CUDA texturing units */ | ||
901 | if (status & 0x040) { | ||
902 | nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display, | ||
903 | "PGRAPH - TRAP_TEXTURE"); | ||
904 | nv_wr32(dev, 0x400108, 0x040); | ||
905 | status &= ~0x040; | ||
906 | } | ||
907 | |||
908 | /* MP: CUDA execution engines. */ | ||
909 | if (status & 0x080) { | ||
910 | nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display, | ||
911 | "PGRAPH - TRAP_MP"); | ||
912 | nv_wr32(dev, 0x400108, 0x080); | ||
913 | status &= ~0x080; | ||
914 | } | ||
915 | |||
916 | /* TPDMA: Handles TP-initiated uncached memory accesses: | ||
917 | * l[], g[], stack, 2d surfaces, render targets. */ | ||
918 | if (status & 0x100) { | ||
919 | nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display, | ||
920 | "PGRAPH - TRAP_TPDMA"); | ||
921 | nv_wr32(dev, 0x400108, 0x100); | ||
922 | status &= ~0x100; | ||
923 | } | ||
924 | |||
925 | if (status) { | ||
926 | if (display) | ||
927 | NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status); | ||
928 | nv_wr32(dev, 0x400108, status); | ||
929 | } | ||
930 | |||
931 | return 1; | ||
932 | } | ||
933 | |||
934 | static int | ||
935 | nv50_graph_isr_chid(struct drm_device *dev, u64 inst) | ||
936 | { | ||
937 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
938 | struct nouveau_channel *chan; | ||
939 | unsigned long flags; | ||
940 | int i; | ||
941 | |||
942 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
943 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
944 | chan = dev_priv->channels.ptr[i]; | ||
945 | if (!chan || !chan->ramin) | ||
946 | continue; | ||
947 | |||
948 | if (inst == chan->ramin->vinst) | ||
949 | break; | ||
950 | } | ||
951 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
952 | return i; | ||
953 | } | ||
954 | |||
955 | static void | ||
956 | nv50_graph_isr(struct drm_device *dev) | ||
957 | { | ||
958 | u32 stat; | ||
959 | |||
960 | while ((stat = nv_rd32(dev, 0x400100))) { | ||
961 | u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12; | ||
962 | u32 chid = nv50_graph_isr_chid(dev, inst); | ||
963 | u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
964 | u32 subc = (addr & 0x00070000) >> 16; | ||
965 | u32 mthd = (addr & 0x00001ffc); | ||
966 | u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
967 | u32 class = nv_rd32(dev, 0x400814); | ||
968 | u32 show = stat; | ||
969 | |||
970 | if (stat & 0x00000010) { | ||
971 | if (!nouveau_gpuobj_mthd_call2(dev, chid, class, | ||
972 | mthd, data)) | ||
973 | show &= ~0x00000010; | ||
974 | } | ||
975 | |||
976 | if (stat & 0x00001000) { | ||
977 | nv_wr32(dev, 0x400500, 0x00000000); | ||
978 | nv_wr32(dev, 0x400100, 0x00001000); | ||
979 | nv_mask(dev, 0x40013c, 0x00001000, 0x00000000); | ||
980 | nv50_graph_context_switch(dev); | ||
981 | stat &= ~0x00001000; | ||
982 | show &= ~0x00001000; | ||
983 | } | ||
984 | |||
985 | show = (show && nouveau_ratelimit()) ? show : 0; | ||
986 | |||
987 | if (show & 0x00100000) { | ||
988 | u32 ecode = nv_rd32(dev, 0x400110); | ||
989 | NV_INFO(dev, "PGRAPH - DATA_ERROR "); | ||
990 | nouveau_enum_print(nv50_data_error_names, ecode); | ||
991 | printk("\n"); | ||
992 | } | ||
993 | |||
994 | if (stat & 0x00200000) { | ||
995 | if (!nv50_pgraph_trap_handler(dev, show, inst, chid)) | ||
996 | show &= ~0x00200000; | ||
997 | } | ||
998 | |||
999 | nv_wr32(dev, 0x400100, stat); | ||
1000 | nv_wr32(dev, 0x400500, 0x00010001); | ||
1001 | |||
1002 | if (show) { | ||
1003 | NV_INFO(dev, "PGRAPH -"); | ||
1004 | nouveau_bitfield_print(nv50_graph_intr, show); | ||
1005 | printk("\n"); | ||
1006 | NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d " | ||
1007 | "class 0x%04x mthd 0x%04x data 0x%08x\n", | ||
1008 | chid, inst, subc, class, mthd, data); | ||
1009 | } | ||
1010 | } | ||
1011 | |||
1012 | if (nv_rd32(dev, 0x400824) & (1 << 31)) | ||
1013 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | ||
1014 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index b773229b7647..adac4da98f7e 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -27,14 +27,20 @@ | |||
27 | 27 | ||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "drm.h" | 29 | #include "drm.h" |
30 | |||
30 | #include "nouveau_drv.h" | 31 | #include "nouveau_drv.h" |
32 | #include "nouveau_vm.h" | ||
33 | |||
34 | #define BAR1_VM_BASE 0x0020000000ULL | ||
35 | #define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1) | ||
36 | #define BAR3_VM_BASE 0x0000000000ULL | ||
37 | #define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3) | ||
31 | 38 | ||
32 | struct nv50_instmem_priv { | 39 | struct nv50_instmem_priv { |
33 | uint32_t save1700[5]; /* 0x1700->0x1710 */ | 40 | uint32_t save1700[5]; /* 0x1700->0x1710 */ |
34 | 41 | ||
35 | struct nouveau_gpuobj *pramin_pt; | 42 | struct nouveau_gpuobj *bar1_dmaobj; |
36 | struct nouveau_gpuobj *pramin_bar; | 43 | struct nouveau_gpuobj *bar3_dmaobj; |
37 | struct nouveau_gpuobj *fb_bar; | ||
38 | }; | 44 | }; |
39 | 45 | ||
40 | static void | 46 | static void |
@@ -48,6 +54,7 @@ nv50_channel_del(struct nouveau_channel **pchan) | |||
48 | return; | 54 | return; |
49 | 55 | ||
50 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | 56 | nouveau_gpuobj_ref(NULL, &chan->ramfc); |
57 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); | ||
51 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); | 58 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); |
52 | if (chan->ramin_heap.free_stack.next) | 59 | if (chan->ramin_heap.free_stack.next) |
53 | drm_mm_takedown(&chan->ramin_heap); | 60 | drm_mm_takedown(&chan->ramin_heap); |
@@ -56,14 +63,14 @@ nv50_channel_del(struct nouveau_channel **pchan) | |||
56 | } | 63 | } |
57 | 64 | ||
58 | static int | 65 | static int |
59 | nv50_channel_new(struct drm_device *dev, u32 size, | 66 | nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm, |
60 | struct nouveau_channel **pchan) | 67 | struct nouveau_channel **pchan) |
61 | { | 68 | { |
62 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 69 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
63 | u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; | 70 | u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; |
64 | u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200; | 71 | u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200; |
65 | struct nouveau_channel *chan; | 72 | struct nouveau_channel *chan; |
66 | int ret; | 73 | int ret, i; |
67 | 74 | ||
68 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | 75 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); |
69 | if (!chan) | 76 | if (!chan) |
@@ -92,6 +99,17 @@ nv50_channel_new(struct drm_device *dev, u32 size, | |||
92 | return ret; | 99 | return ret; |
93 | } | 100 | } |
94 | 101 | ||
102 | for (i = 0; i < 0x4000; i += 8) { | ||
103 | nv_wo32(chan->vm_pd, i + 0, 0x00000000); | ||
104 | nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); | ||
105 | } | ||
106 | |||
107 | ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd); | ||
108 | if (ret) { | ||
109 | nv50_channel_del(&chan); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
95 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : | 113 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : |
96 | chan->ramin->pinst + fc, | 114 | chan->ramin->pinst + fc, |
97 | chan->ramin->vinst + fc, 0x100, | 115 | chan->ramin->vinst + fc, 0x100, |
@@ -111,6 +129,7 @@ nv50_instmem_init(struct drm_device *dev) | |||
111 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 129 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
112 | struct nv50_instmem_priv *priv; | 130 | struct nv50_instmem_priv *priv; |
113 | struct nouveau_channel *chan; | 131 | struct nouveau_channel *chan; |
132 | struct nouveau_vm *vm; | ||
114 | int ret, i; | 133 | int ret, i; |
115 | u32 tmp; | 134 | u32 tmp; |
116 | 135 | ||
@@ -127,112 +146,89 @@ nv50_instmem_init(struct drm_device *dev) | |||
127 | ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size); | 146 | ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size); |
128 | if (ret) { | 147 | if (ret) { |
129 | NV_ERROR(dev, "Failed to init RAMIN heap\n"); | 148 | NV_ERROR(dev, "Failed to init RAMIN heap\n"); |
130 | return -ENOMEM; | 149 | goto error; |
131 | } | 150 | } |
132 | 151 | ||
133 | /* we need a channel to plug into the hw to control the BARs */ | 152 | /* BAR3 */ |
134 | ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]); | 153 | ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE, |
154 | 29, 12, 16, &dev_priv->bar3_vm); | ||
135 | if (ret) | 155 | if (ret) |
136 | return ret; | 156 | goto error; |
137 | chan = dev_priv->fifos[127] = dev_priv->fifos[0]; | ||
138 | 157 | ||
139 | /* allocate page table for PRAMIN BAR */ | 158 | ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8, |
140 | ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8, | 159 | 0x1000, NVOBJ_FLAG_DONT_MAP | |
141 | 0x1000, NVOBJ_FLAG_ZERO_ALLOC, | 160 | NVOBJ_FLAG_ZERO_ALLOC, |
142 | &priv->pramin_pt); | 161 | &dev_priv->bar3_vm->pgt[0].obj); |
143 | if (ret) | 162 | if (ret) |
144 | return ret; | 163 | goto error; |
164 | dev_priv->bar3_vm->pgt[0].page_shift = 12; | ||
165 | dev_priv->bar3_vm->pgt[0].refcount = 1; | ||
145 | 166 | ||
146 | nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63); | 167 | nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj); |
147 | nv_wo32(chan->vm_pd, 0x0004, 0); | ||
148 | 168 | ||
149 | /* DMA object for PRAMIN BAR */ | 169 | ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan); |
150 | ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar); | ||
151 | if (ret) | 170 | if (ret) |
152 | return ret; | 171 | goto error; |
153 | nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000); | 172 | dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan; |
154 | nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1); | 173 | |
155 | nv_wo32(priv->pramin_bar, 0x08, 0x00000000); | 174 | ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE, |
156 | nv_wo32(priv->pramin_bar, 0x0c, 0x00000000); | 175 | NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM, |
157 | nv_wo32(priv->pramin_bar, 0x10, 0x00000000); | 176 | NV_MEM_TYPE_VM, NV_MEM_COMP_VM, |
158 | nv_wo32(priv->pramin_bar, 0x14, 0x00000000); | 177 | &priv->bar3_dmaobj); |
159 | |||
160 | /* map channel into PRAMIN, gpuobj didn't do it for us */ | ||
161 | ret = nv50_instmem_bind(dev, chan->ramin); | ||
162 | if (ret) | 178 | if (ret) |
163 | return ret; | 179 | goto error; |
164 | 180 | ||
165 | /* poke regs... */ | ||
166 | nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); | 181 | nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); |
167 | nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12)); | 182 | nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12)); |
168 | nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4)); | 183 | nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4)); |
169 | |||
170 | tmp = nv_ri32(dev, 0); | ||
171 | nv_wi32(dev, 0, ~tmp); | ||
172 | if (nv_ri32(dev, 0) != ~tmp) { | ||
173 | NV_ERROR(dev, "PRAMIN readback failed\n"); | ||
174 | return -EIO; | ||
175 | } | ||
176 | nv_wi32(dev, 0, tmp); | ||
177 | 184 | ||
185 | dev_priv->engine.instmem.flush(dev); | ||
178 | dev_priv->ramin_available = true; | 186 | dev_priv->ramin_available = true; |
179 | 187 | ||
180 | /* Determine VM layout */ | 188 | tmp = nv_ro32(chan->ramin, 0); |
181 | dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK); | 189 | nv_wo32(chan->ramin, 0, ~tmp); |
182 | dev_priv->vm_gart_size = NV50_VM_BLOCK; | 190 | if (nv_ro32(chan->ramin, 0) != ~tmp) { |
183 | 191 | NV_ERROR(dev, "PRAMIN readback failed\n"); | |
184 | dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; | 192 | ret = -EIO; |
185 | dev_priv->vm_vram_size = dev_priv->vram_size; | 193 | goto error; |
186 | if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) | ||
187 | dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; | ||
188 | dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); | ||
189 | dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK; | ||
190 | |||
191 | dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size; | ||
192 | |||
193 | NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n", | ||
194 | dev_priv->vm_gart_base, | ||
195 | dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1); | ||
196 | NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n", | ||
197 | dev_priv->vm_vram_base, | ||
198 | dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1); | ||
199 | |||
200 | /* VRAM page table(s), mapped into VM at +1GiB */ | ||
201 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { | ||
202 | ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8, | ||
203 | 0, NVOBJ_FLAG_ZERO_ALLOC, | ||
204 | &chan->vm_vram_pt[i]); | ||
205 | if (ret) { | ||
206 | NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret); | ||
207 | dev_priv->vm_vram_pt_nr = i; | ||
208 | return ret; | ||
209 | } | ||
210 | dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]; | ||
211 | |||
212 | nv_wo32(chan->vm_pd, 0x10 + (i*8), | ||
213 | chan->vm_vram_pt[i]->vinst | 0x61); | ||
214 | nv_wo32(chan->vm_pd, 0x14 + (i*8), 0); | ||
215 | } | 194 | } |
195 | nv_wo32(chan->ramin, 0, tmp); | ||
216 | 196 | ||
217 | /* DMA object for FB BAR */ | 197 | /* BAR1 */ |
218 | ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar); | 198 | ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, |
199 | 29, 12, 16, &vm); | ||
219 | if (ret) | 200 | if (ret) |
220 | return ret; | 201 | goto error; |
221 | nv_wo32(priv->fb_bar, 0x00, 0x7fc00000); | ||
222 | nv_wo32(priv->fb_bar, 0x04, 0x40000000 + | ||
223 | pci_resource_len(dev->pdev, 1) - 1); | ||
224 | nv_wo32(priv->fb_bar, 0x08, 0x40000000); | ||
225 | nv_wo32(priv->fb_bar, 0x0c, 0x00000000); | ||
226 | nv_wo32(priv->fb_bar, 0x10, 0x00000000); | ||
227 | nv_wo32(priv->fb_bar, 0x14, 0x00000000); | ||
228 | 202 | ||
229 | dev_priv->engine.instmem.flush(dev); | 203 | ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd); |
204 | if (ret) | ||
205 | goto error; | ||
206 | nouveau_vm_ref(NULL, &vm, NULL); | ||
207 | |||
208 | ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE, | ||
209 | NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM, | ||
210 | NV_MEM_TYPE_VM, NV_MEM_COMP_VM, | ||
211 | &priv->bar1_dmaobj); | ||
212 | if (ret) | ||
213 | goto error; | ||
230 | 214 | ||
231 | nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4)); | 215 | nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4)); |
232 | for (i = 0; i < 8; i++) | 216 | for (i = 0; i < 8; i++) |
233 | nv_wr32(dev, 0x1900 + (i*4), 0); | 217 | nv_wr32(dev, 0x1900 + (i*4), 0); |
234 | 218 | ||
219 | /* Create shared channel VM, space is reserved at the beginning | ||
220 | * to catch "NULL pointer" references | ||
221 | */ | ||
222 | ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL, | ||
223 | 29, 12, 16, &dev_priv->chan_vm); | ||
224 | if (ret) | ||
225 | return ret; | ||
226 | |||
235 | return 0; | 227 | return 0; |
228 | |||
229 | error: | ||
230 | nv50_instmem_takedown(dev); | ||
231 | return ret; | ||
236 | } | 232 | } |
237 | 233 | ||
238 | void | 234 | void |
@@ -240,7 +236,7 @@ nv50_instmem_takedown(struct drm_device *dev) | |||
240 | { | 236 | { |
241 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 237 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
242 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 238 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; |
243 | struct nouveau_channel *chan = dev_priv->fifos[0]; | 239 | struct nouveau_channel *chan = dev_priv->channels.ptr[0]; |
244 | int i; | 240 | int i; |
245 | 241 | ||
246 | NV_DEBUG(dev, "\n"); | 242 | NV_DEBUG(dev, "\n"); |
@@ -250,23 +246,23 @@ nv50_instmem_takedown(struct drm_device *dev) | |||
250 | 246 | ||
251 | dev_priv->ramin_available = false; | 247 | dev_priv->ramin_available = false; |
252 | 248 | ||
253 | /* Restore state from before init */ | 249 | nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL); |
250 | |||
254 | for (i = 0x1700; i <= 0x1710; i += 4) | 251 | for (i = 0x1700; i <= 0x1710; i += 4) |
255 | nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); | 252 | nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); |
256 | 253 | ||
257 | nouveau_gpuobj_ref(NULL, &priv->fb_bar); | 254 | nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj); |
258 | nouveau_gpuobj_ref(NULL, &priv->pramin_bar); | 255 | nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj); |
259 | nouveau_gpuobj_ref(NULL, &priv->pramin_pt); | ||
260 | 256 | ||
261 | /* Destroy dummy channel */ | 257 | nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd); |
262 | if (chan) { | 258 | dev_priv->channels.ptr[127] = 0; |
263 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) | 259 | nv50_channel_del(&dev_priv->channels.ptr[0]); |
264 | nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); | ||
265 | dev_priv->vm_vram_pt_nr = 0; | ||
266 | 260 | ||
267 | nv50_channel_del(&dev_priv->fifos[0]); | 261 | nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj); |
268 | dev_priv->fifos[127] = NULL; | 262 | nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); |
269 | } | 263 | |
264 | if (dev_priv->ramin_heap.free_stack.next) | ||
265 | drm_mm_takedown(&dev_priv->ramin_heap); | ||
270 | 266 | ||
271 | dev_priv->engine.instmem.priv = NULL; | 267 | dev_priv->engine.instmem.priv = NULL; |
272 | kfree(priv); | 268 | kfree(priv); |
@@ -276,16 +272,8 @@ int | |||
276 | nv50_instmem_suspend(struct drm_device *dev) | 272 | nv50_instmem_suspend(struct drm_device *dev) |
277 | { | 273 | { |
278 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 274 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
279 | struct nouveau_channel *chan = dev_priv->fifos[0]; | ||
280 | struct nouveau_gpuobj *ramin = chan->ramin; | ||
281 | int i; | ||
282 | 275 | ||
283 | ramin->im_backing_suspend = vmalloc(ramin->size); | 276 | dev_priv->ramin_available = false; |
284 | if (!ramin->im_backing_suspend) | ||
285 | return -ENOMEM; | ||
286 | |||
287 | for (i = 0; i < ramin->size; i += 4) | ||
288 | ramin->im_backing_suspend[i/4] = nv_ri32(dev, i); | ||
289 | return 0; | 277 | return 0; |
290 | } | 278 | } |
291 | 279 | ||
@@ -294,146 +282,121 @@ nv50_instmem_resume(struct drm_device *dev) | |||
294 | { | 282 | { |
295 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 283 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
296 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 284 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; |
297 | struct nouveau_channel *chan = dev_priv->fifos[0]; | 285 | struct nouveau_channel *chan = dev_priv->channels.ptr[0]; |
298 | struct nouveau_gpuobj *ramin = chan->ramin; | ||
299 | int i; | 286 | int i; |
300 | 287 | ||
301 | dev_priv->ramin_available = false; | ||
302 | dev_priv->ramin_base = ~0; | ||
303 | for (i = 0; i < ramin->size; i += 4) | ||
304 | nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]); | ||
305 | dev_priv->ramin_available = true; | ||
306 | vfree(ramin->im_backing_suspend); | ||
307 | ramin->im_backing_suspend = NULL; | ||
308 | |||
309 | /* Poke the relevant regs, and pray it works :) */ | 288 | /* Poke the relevant regs, and pray it works :) */ |
310 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); | 289 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); |
311 | nv_wr32(dev, NV50_PUNK_UNK1710, 0); | 290 | nv_wr32(dev, NV50_PUNK_UNK1710, 0); |
312 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | | 291 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | |
313 | NV50_PUNK_BAR_CFG_BASE_VALID); | 292 | NV50_PUNK_BAR_CFG_BASE_VALID); |
314 | nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) | | 293 | nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) | |
315 | NV50_PUNK_BAR1_CTXDMA_VALID); | 294 | NV50_PUNK_BAR1_CTXDMA_VALID); |
316 | nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) | | 295 | nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) | |
317 | NV50_PUNK_BAR3_CTXDMA_VALID); | 296 | NV50_PUNK_BAR3_CTXDMA_VALID); |
318 | 297 | ||
319 | for (i = 0; i < 8; i++) | 298 | for (i = 0; i < 8; i++) |
320 | nv_wr32(dev, 0x1900 + (i*4), 0); | 299 | nv_wr32(dev, 0x1900 + (i*4), 0); |
300 | |||
301 | dev_priv->ramin_available = true; | ||
321 | } | 302 | } |
322 | 303 | ||
304 | struct nv50_gpuobj_node { | ||
305 | struct nouveau_vram *vram; | ||
306 | struct nouveau_vma chan_vma; | ||
307 | u32 align; | ||
308 | }; | ||
309 | |||
310 | |||
323 | int | 311 | int |
324 | nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, | 312 | nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) |
325 | uint32_t *sz) | ||
326 | { | 313 | { |
314 | struct drm_device *dev = gpuobj->dev; | ||
315 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
316 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
317 | struct nv50_gpuobj_node *node = NULL; | ||
327 | int ret; | 318 | int ret; |
328 | 319 | ||
329 | if (gpuobj->im_backing) | 320 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
330 | return -EINVAL; | 321 | if (!node) |
322 | return -ENOMEM; | ||
323 | node->align = align; | ||
331 | 324 | ||
332 | *sz = ALIGN(*sz, 4096); | 325 | size = (size + 4095) & ~4095; |
333 | if (*sz == 0) | 326 | align = max(align, (u32)4096); |
334 | return -EINVAL; | ||
335 | 327 | ||
336 | ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, | 328 | ret = vram->get(dev, size, align, 0, 0, &node->vram); |
337 | true, false, &gpuobj->im_backing); | ||
338 | if (ret) { | 329 | if (ret) { |
339 | NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); | 330 | kfree(node); |
340 | return ret; | 331 | return ret; |
341 | } | 332 | } |
342 | 333 | ||
343 | ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); | 334 | gpuobj->vinst = node->vram->offset; |
344 | if (ret) { | 335 | |
345 | NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); | 336 | if (gpuobj->flags & NVOBJ_FLAG_VM) { |
346 | nouveau_bo_ref(NULL, &gpuobj->im_backing); | 337 | ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, |
347 | return ret; | 338 | NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, |
339 | &node->chan_vma); | ||
340 | if (ret) { | ||
341 | vram->put(dev, &node->vram); | ||
342 | kfree(node); | ||
343 | return ret; | ||
344 | } | ||
345 | |||
346 | nouveau_vm_map(&node->chan_vma, node->vram); | ||
347 | gpuobj->vinst = node->chan_vma.offset; | ||
348 | } | 348 | } |
349 | 349 | ||
350 | gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; | 350 | gpuobj->size = size; |
351 | gpuobj->node = node; | ||
351 | return 0; | 352 | return 0; |
352 | } | 353 | } |
353 | 354 | ||
354 | void | 355 | void |
355 | nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 356 | nv50_instmem_put(struct nouveau_gpuobj *gpuobj) |
356 | { | 357 | { |
358 | struct drm_device *dev = gpuobj->dev; | ||
357 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 359 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
360 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
361 | struct nv50_gpuobj_node *node; | ||
362 | |||
363 | node = gpuobj->node; | ||
364 | gpuobj->node = NULL; | ||
358 | 365 | ||
359 | if (gpuobj && gpuobj->im_backing) { | 366 | if (node->chan_vma.node) { |
360 | if (gpuobj->im_bound) | 367 | nouveau_vm_unmap(&node->chan_vma); |
361 | dev_priv->engine.instmem.unbind(dev, gpuobj); | 368 | nouveau_vm_put(&node->chan_vma); |
362 | nouveau_bo_unpin(gpuobj->im_backing); | ||
363 | nouveau_bo_ref(NULL, &gpuobj->im_backing); | ||
364 | gpuobj->im_backing = NULL; | ||
365 | } | 369 | } |
370 | vram->put(dev, &node->vram); | ||
371 | kfree(node); | ||
366 | } | 372 | } |
367 | 373 | ||
368 | int | 374 | int |
369 | nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 375 | nv50_instmem_map(struct nouveau_gpuobj *gpuobj) |
370 | { | 376 | { |
371 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 377 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
372 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 378 | struct nv50_gpuobj_node *node = gpuobj->node; |
373 | struct nouveau_gpuobj *pramin_pt = priv->pramin_pt; | 379 | int ret; |
374 | uint32_t pte, pte_end; | ||
375 | uint64_t vram; | ||
376 | |||
377 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) | ||
378 | return -EINVAL; | ||
379 | |||
380 | NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", | ||
381 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); | ||
382 | |||
383 | pte = (gpuobj->im_pramin->start >> 12) << 1; | ||
384 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; | ||
385 | vram = gpuobj->vinst; | ||
386 | |||
387 | NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", | ||
388 | gpuobj->im_pramin->start, pte, pte_end); | ||
389 | NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); | ||
390 | |||
391 | vram |= 1; | ||
392 | if (dev_priv->vram_sys_base) { | ||
393 | vram += dev_priv->vram_sys_base; | ||
394 | vram |= 0x30; | ||
395 | } | ||
396 | |||
397 | while (pte < pte_end) { | ||
398 | nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); | ||
399 | nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); | ||
400 | vram += 0x1000; | ||
401 | pte += 2; | ||
402 | } | ||
403 | dev_priv->engine.instmem.flush(dev); | ||
404 | 380 | ||
405 | nv50_vm_flush(dev, 6); | 381 | ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12, |
382 | NV_MEM_ACCESS_RW, &node->vram->bar_vma); | ||
383 | if (ret) | ||
384 | return ret; | ||
406 | 385 | ||
407 | gpuobj->im_bound = 1; | 386 | nouveau_vm_map(&node->vram->bar_vma, node->vram); |
387 | gpuobj->pinst = node->vram->bar_vma.offset; | ||
408 | return 0; | 388 | return 0; |
409 | } | 389 | } |
410 | 390 | ||
411 | int | 391 | void |
412 | nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 392 | nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) |
413 | { | 393 | { |
414 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 394 | struct nv50_gpuobj_node *node = gpuobj->node; |
415 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | ||
416 | uint32_t pte, pte_end; | ||
417 | |||
418 | if (gpuobj->im_bound == 0) | ||
419 | return -EINVAL; | ||
420 | |||
421 | /* can happen during late takedown */ | ||
422 | if (unlikely(!dev_priv->ramin_available)) | ||
423 | return 0; | ||
424 | 395 | ||
425 | pte = (gpuobj->im_pramin->start >> 12) << 1; | 396 | if (node->vram->bar_vma.node) { |
426 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; | 397 | nouveau_vm_unmap(&node->vram->bar_vma); |
427 | 398 | nouveau_vm_put(&node->vram->bar_vma); | |
428 | while (pte < pte_end) { | ||
429 | nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000); | ||
430 | nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000); | ||
431 | pte += 2; | ||
432 | } | 399 | } |
433 | dev_priv->engine.instmem.flush(dev); | ||
434 | |||
435 | gpuobj->im_bound = 0; | ||
436 | return 0; | ||
437 | } | 400 | } |
438 | 401 | ||
439 | void | 402 | void |
@@ -452,11 +415,3 @@ nv84_instmem_flush(struct drm_device *dev) | |||
452 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 415 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
453 | } | 416 | } |
454 | 417 | ||
455 | void | ||
456 | nv50_vm_flush(struct drm_device *dev, int engine) | ||
457 | { | ||
458 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); | ||
459 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) | ||
460 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); | ||
461 | } | ||
462 | |||
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c new file mode 100644 index 000000000000..eebab95f59b2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
@@ -0,0 +1,178 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | |||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_vm.h" | ||
29 | |||
30 | void | ||
31 | nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, | ||
32 | struct nouveau_gpuobj *pgt) | ||
33 | { | ||
34 | struct drm_nouveau_private *dev_priv = pgd->dev->dev_private; | ||
35 | u32 coverage = (pgt->size >> 3) << type; | ||
36 | u64 phys; | ||
37 | |||
38 | phys = pgt->vinst; | ||
39 | phys |= 0x01; /* present */ | ||
40 | phys |= (type == 12) ? 0x02 : 0x00; /* 4KiB pages */ | ||
41 | if (dev_priv->vram_sys_base) { | ||
42 | phys += dev_priv->vram_sys_base; | ||
43 | phys |= 0x30; | ||
44 | } | ||
45 | |||
46 | if (coverage <= 32 * 1024 * 1024) | ||
47 | phys |= 0x60; | ||
48 | else if (coverage <= 64 * 1024 * 1024) | ||
49 | phys |= 0x40; | ||
50 | else if (coverage < 128 * 1024 * 1024) | ||
51 | phys |= 0x20; | ||
52 | |||
53 | nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); | ||
54 | nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); | ||
55 | } | ||
56 | |||
57 | void | ||
58 | nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde) | ||
59 | { | ||
60 | nv_wo32(pgd, (pde * 8) + 0, 0x00000000); | ||
61 | nv_wo32(pgd, (pde * 8) + 4, 0xdeadcafe); | ||
62 | } | ||
63 | |||
64 | static inline u64 | ||
65 | nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | ||
66 | u64 phys, u32 memtype, u32 target) | ||
67 | { | ||
68 | struct drm_nouveau_private *dev_priv = pgt->dev->dev_private; | ||
69 | |||
70 | phys |= 1; /* present */ | ||
71 | phys |= (u64)memtype << 40; | ||
72 | |||
73 | /* IGPs don't have real VRAM, re-target to stolen system memory */ | ||
74 | if (target == 0 && dev_priv->vram_sys_base) { | ||
75 | phys += dev_priv->vram_sys_base; | ||
76 | target = 3; | ||
77 | } | ||
78 | |||
79 | phys |= target << 4; | ||
80 | |||
81 | if (vma->access & NV_MEM_ACCESS_SYS) | ||
82 | phys |= (1 << 6); | ||
83 | |||
84 | if (!(vma->access & NV_MEM_ACCESS_WO)) | ||
85 | phys |= (1 << 3); | ||
86 | |||
87 | return phys; | ||
88 | } | ||
89 | |||
90 | void | ||
91 | nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | ||
92 | struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) | ||
93 | { | ||
94 | u32 block, i; | ||
95 | |||
96 | phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0); | ||
97 | pte <<= 3; | ||
98 | cnt <<= 3; | ||
99 | |||
100 | while (cnt) { | ||
101 | u32 offset_h = upper_32_bits(phys); | ||
102 | u32 offset_l = lower_32_bits(phys); | ||
103 | |||
104 | for (i = 7; i >= 0; i--) { | ||
105 | block = 1 << (i + 3); | ||
106 | if (cnt >= block && !(pte & (block - 1))) | ||
107 | break; | ||
108 | } | ||
109 | offset_l |= (i << 7); | ||
110 | |||
111 | phys += block << (vma->node->type - 3); | ||
112 | cnt -= block; | ||
113 | |||
114 | while (block) { | ||
115 | nv_wo32(pgt, pte + 0, offset_l); | ||
116 | nv_wo32(pgt, pte + 4, offset_h); | ||
117 | pte += 8; | ||
118 | block -= 8; | ||
119 | } | ||
120 | } | ||
121 | } | ||
122 | |||
123 | void | ||
124 | nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | ||
125 | u32 pte, dma_addr_t *list, u32 cnt) | ||
126 | { | ||
127 | pte <<= 3; | ||
128 | while (cnt--) { | ||
129 | u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2); | ||
130 | nv_wo32(pgt, pte + 0, lower_32_bits(phys)); | ||
131 | nv_wo32(pgt, pte + 4, upper_32_bits(phys)); | ||
132 | pte += 8; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | void | ||
137 | nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) | ||
138 | { | ||
139 | pte <<= 3; | ||
140 | while (cnt--) { | ||
141 | nv_wo32(pgt, pte + 0, 0x00000000); | ||
142 | nv_wo32(pgt, pte + 4, 0x00000000); | ||
143 | pte += 8; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | void | ||
148 | nv50_vm_flush(struct nouveau_vm *vm) | ||
149 | { | ||
150 | struct drm_nouveau_private *dev_priv = vm->dev->dev_private; | ||
151 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | ||
152 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
153 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
154 | struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; | ||
155 | |||
156 | pinstmem->flush(vm->dev); | ||
157 | |||
158 | /* BAR */ | ||
159 | if (vm != dev_priv->chan_vm) { | ||
160 | nv50_vm_flush_engine(vm->dev, 6); | ||
161 | return; | ||
162 | } | ||
163 | |||
164 | pfifo->tlb_flush(vm->dev); | ||
165 | |||
166 | if (atomic_read(&vm->pgraph_refs)) | ||
167 | pgraph->tlb_flush(vm->dev); | ||
168 | if (atomic_read(&vm->pcrypt_refs)) | ||
169 | pcrypt->tlb_flush(vm->dev); | ||
170 | } | ||
171 | |||
172 | void | ||
173 | nv50_vm_flush_engine(struct drm_device *dev, int engine) | ||
174 | { | ||
175 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); | ||
176 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) | ||
177 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); | ||
178 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c new file mode 100644 index 000000000000..47489ed0a5a8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_vram.c | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_mm.h" | ||
28 | |||
29 | static int types[0x80] = { | ||
30 | 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
31 | 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, | ||
32 | 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0, | ||
33 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
34 | 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0, | ||
35 | 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
36 | 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, | ||
37 | 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0 | ||
38 | }; | ||
39 | |||
40 | bool | ||
41 | nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags) | ||
42 | { | ||
43 | int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8; | ||
44 | |||
45 | if (likely(type < sizeof(types) && types[type])) | ||
46 | return true; | ||
47 | return false; | ||
48 | } | ||
49 | |||
50 | void | ||
51 | nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram) | ||
52 | { | ||
53 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
54 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; | ||
55 | struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; | ||
56 | struct nouveau_mm *mm = man->priv; | ||
57 | struct nouveau_mm_node *this; | ||
58 | struct nouveau_vram *vram; | ||
59 | |||
60 | vram = *pvram; | ||
61 | *pvram = NULL; | ||
62 | if (unlikely(vram == NULL)) | ||
63 | return; | ||
64 | |||
65 | mutex_lock(&mm->mutex); | ||
66 | while (!list_empty(&vram->regions)) { | ||
67 | this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); | ||
68 | |||
69 | list_del(&this->rl_entry); | ||
70 | nouveau_mm_put(mm, this); | ||
71 | } | ||
72 | mutex_unlock(&mm->mutex); | ||
73 | |||
74 | kfree(vram); | ||
75 | } | ||
76 | |||
77 | int | ||
78 | nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, | ||
79 | u32 type, struct nouveau_vram **pvram) | ||
80 | { | ||
81 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
82 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; | ||
83 | struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; | ||
84 | struct nouveau_mm *mm = man->priv; | ||
85 | struct nouveau_mm_node *r; | ||
86 | struct nouveau_vram *vram; | ||
87 | int ret; | ||
88 | |||
89 | if (!types[type]) | ||
90 | return -EINVAL; | ||
91 | size >>= 12; | ||
92 | align >>= 12; | ||
93 | size_nc >>= 12; | ||
94 | |||
95 | vram = kzalloc(sizeof(*vram), GFP_KERNEL); | ||
96 | if (!vram) | ||
97 | return -ENOMEM; | ||
98 | |||
99 | INIT_LIST_HEAD(&vram->regions); | ||
100 | vram->dev = dev_priv->dev; | ||
101 | vram->memtype = type; | ||
102 | vram->size = size; | ||
103 | |||
104 | mutex_lock(&mm->mutex); | ||
105 | do { | ||
106 | ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r); | ||
107 | if (ret) { | ||
108 | mutex_unlock(&mm->mutex); | ||
109 | nv50_vram_del(dev, &vram); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | list_add_tail(&r->rl_entry, &vram->regions); | ||
114 | size -= r->length; | ||
115 | } while (size); | ||
116 | mutex_unlock(&mm->mutex); | ||
117 | |||
118 | r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); | ||
119 | vram->offset = (u64)r->offset << 12; | ||
120 | *pvram = vram; | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static u32 | ||
125 | nv50_vram_rblock(struct drm_device *dev) | ||
126 | { | ||
127 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
128 | int i, parts, colbits, rowbitsa, rowbitsb, banks; | ||
129 | u64 rowsize, predicted; | ||
130 | u32 r0, r4, rt, ru, rblock_size; | ||
131 | |||
132 | r0 = nv_rd32(dev, 0x100200); | ||
133 | r4 = nv_rd32(dev, 0x100204); | ||
134 | rt = nv_rd32(dev, 0x100250); | ||
135 | ru = nv_rd32(dev, 0x001540); | ||
136 | NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); | ||
137 | |||
138 | for (i = 0, parts = 0; i < 8; i++) { | ||
139 | if (ru & (0x00010000 << i)) | ||
140 | parts++; | ||
141 | } | ||
142 | |||
143 | colbits = (r4 & 0x0000f000) >> 12; | ||
144 | rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; | ||
145 | rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; | ||
146 | banks = ((r4 & 0x01000000) ? 8 : 4); | ||
147 | |||
148 | rowsize = parts * banks * (1 << colbits) * 8; | ||
149 | predicted = rowsize << rowbitsa; | ||
150 | if (r0 & 0x00000004) | ||
151 | predicted += rowsize << rowbitsb; | ||
152 | |||
153 | if (predicted != dev_priv->vram_size) { | ||
154 | NV_WARN(dev, "memory controller reports %dMiB VRAM\n", | ||
155 | (u32)(dev_priv->vram_size >> 20)); | ||
156 | NV_WARN(dev, "we calculated %dMiB VRAM\n", | ||
157 | (u32)(predicted >> 20)); | ||
158 | } | ||
159 | |||
160 | rblock_size = rowsize; | ||
161 | if (rt & 1) | ||
162 | rblock_size *= 3; | ||
163 | |||
164 | NV_DEBUG(dev, "rblock %d bytes\n", rblock_size); | ||
165 | return rblock_size; | ||
166 | } | ||
167 | |||
168 | int | ||
169 | nv50_vram_init(struct drm_device *dev) | ||
170 | { | ||
171 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
172 | |||
173 | dev_priv->vram_size = nv_rd32(dev, 0x10020c); | ||
174 | dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; | ||
175 | dev_priv->vram_size &= 0xffffffff00ULL; | ||
176 | |||
177 | switch (dev_priv->chipset) { | ||
178 | case 0xaa: | ||
179 | case 0xac: | ||
180 | case 0xaf: | ||
181 | dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12; | ||
182 | dev_priv->vram_rblock_size = 4096; | ||
183 | break; | ||
184 | default: | ||
185 | dev_priv->vram_rblock_size = nv50_vram_rblock(dev); | ||
186 | break; | ||
187 | } | ||
188 | |||
189 | return 0; | ||
190 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c new file mode 100644 index 000000000000..ec18ae1c3886 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_util.h" | ||
28 | #include "nouveau_vm.h" | ||
29 | |||
30 | static void nv84_crypt_isr(struct drm_device *); | ||
31 | |||
32 | int | ||
33 | nv84_crypt_create_context(struct nouveau_channel *chan) | ||
34 | { | ||
35 | struct drm_device *dev = chan->dev; | ||
36 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
37 | struct nouveau_gpuobj *ramin = chan->ramin; | ||
38 | int ret; | ||
39 | |||
40 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
41 | |||
42 | ret = nouveau_gpuobj_new(dev, chan, 256, 0, | ||
43 | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, | ||
44 | &chan->crypt_ctx); | ||
45 | if (ret) | ||
46 | return ret; | ||
47 | |||
48 | nv_wo32(ramin, 0xa0, 0x00190000); | ||
49 | nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff); | ||
50 | nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst); | ||
51 | nv_wo32(ramin, 0xac, 0); | ||
52 | nv_wo32(ramin, 0xb0, 0); | ||
53 | nv_wo32(ramin, 0xb4, 0); | ||
54 | |||
55 | dev_priv->engine.instmem.flush(dev); | ||
56 | atomic_inc(&chan->vm->pcrypt_refs); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | void | ||
61 | nv84_crypt_destroy_context(struct nouveau_channel *chan) | ||
62 | { | ||
63 | struct drm_device *dev = chan->dev; | ||
64 | u32 inst; | ||
65 | |||
66 | if (!chan->crypt_ctx) | ||
67 | return; | ||
68 | |||
69 | inst = (chan->ramin->vinst >> 12); | ||
70 | inst |= 0x80000000; | ||
71 | |||
72 | /* mark context as invalid if still on the hardware, not | ||
73 | * doing this causes issues the next time PCRYPT is used, | ||
74 | * unsurprisingly :) | ||
75 | */ | ||
76 | nv_wr32(dev, 0x10200c, 0x00000000); | ||
77 | if (nv_rd32(dev, 0x102188) == inst) | ||
78 | nv_mask(dev, 0x102188, 0x80000000, 0x00000000); | ||
79 | if (nv_rd32(dev, 0x10218c) == inst) | ||
80 | nv_mask(dev, 0x10218c, 0x80000000, 0x00000000); | ||
81 | nv_wr32(dev, 0x10200c, 0x00000010); | ||
82 | |||
83 | nouveau_gpuobj_ref(NULL, &chan->crypt_ctx); | ||
84 | atomic_dec(&chan->vm->pcrypt_refs); | ||
85 | } | ||
86 | |||
87 | void | ||
88 | nv84_crypt_tlb_flush(struct drm_device *dev) | ||
89 | { | ||
90 | nv50_vm_flush_engine(dev, 0x0a); | ||
91 | } | ||
92 | |||
93 | int | ||
94 | nv84_crypt_init(struct drm_device *dev) | ||
95 | { | ||
96 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
97 | struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; | ||
98 | |||
99 | if (!pcrypt->registered) { | ||
100 | NVOBJ_CLASS(dev, 0x74c1, CRYPT); | ||
101 | pcrypt->registered = true; | ||
102 | } | ||
103 | |||
104 | nv_mask(dev, 0x000200, 0x00004000, 0x00000000); | ||
105 | nv_mask(dev, 0x000200, 0x00004000, 0x00004000); | ||
106 | |||
107 | nouveau_irq_register(dev, 14, nv84_crypt_isr); | ||
108 | nv_wr32(dev, 0x102130, 0xffffffff); | ||
109 | nv_wr32(dev, 0x102140, 0xffffffbf); | ||
110 | |||
111 | nv_wr32(dev, 0x10200c, 0x00000010); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | void | ||
116 | nv84_crypt_fini(struct drm_device *dev) | ||
117 | { | ||
118 | nv_wr32(dev, 0x102140, 0x00000000); | ||
119 | nouveau_irq_unregister(dev, 14); | ||
120 | } | ||
121 | |||
122 | static void | ||
123 | nv84_crypt_isr(struct drm_device *dev) | ||
124 | { | ||
125 | u32 stat = nv_rd32(dev, 0x102130); | ||
126 | u32 mthd = nv_rd32(dev, 0x102190); | ||
127 | u32 data = nv_rd32(dev, 0x102194); | ||
128 | u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff; | ||
129 | int show = nouveau_ratelimit(); | ||
130 | |||
131 | if (show) { | ||
132 | NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
133 | stat, mthd, data, inst); | ||
134 | } | ||
135 | |||
136 | nv_wr32(dev, 0x102130, stat); | ||
137 | nv_wr32(dev, 0x10200c, 0x10); | ||
138 | |||
139 | nv50_fb_vm_trap(dev, show, "PCRYPT"); | ||
140 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 13a0f78a9088..39232085193d 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c | |||
@@ -26,67 +26,89 @@ | |||
26 | 26 | ||
27 | #include "nouveau_drv.h" | 27 | #include "nouveau_drv.h" |
28 | 28 | ||
29 | struct nvc0_gpuobj_node { | ||
30 | struct nouveau_bo *vram; | ||
31 | struct drm_mm_node *ramin; | ||
32 | u32 align; | ||
33 | }; | ||
34 | |||
29 | int | 35 | int |
30 | nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, | 36 | nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) |
31 | uint32_t *size) | ||
32 | { | 37 | { |
38 | struct drm_device *dev = gpuobj->dev; | ||
39 | struct nvc0_gpuobj_node *node = NULL; | ||
33 | int ret; | 40 | int ret; |
34 | 41 | ||
35 | *size = ALIGN(*size, 4096); | 42 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
36 | if (*size == 0) | 43 | if (!node) |
37 | return -EINVAL; | 44 | return -ENOMEM; |
45 | node->align = align; | ||
38 | 46 | ||
39 | ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, | 47 | ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, |
40 | true, false, &gpuobj->im_backing); | 48 | 0, 0x0000, true, false, &node->vram); |
41 | if (ret) { | 49 | if (ret) { |
42 | NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); | 50 | NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); |
43 | return ret; | 51 | return ret; |
44 | } | 52 | } |
45 | 53 | ||
46 | ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); | 54 | ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); |
47 | if (ret) { | 55 | if (ret) { |
48 | NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); | 56 | NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); |
49 | nouveau_bo_ref(NULL, &gpuobj->im_backing); | 57 | nouveau_bo_ref(NULL, &node->vram); |
50 | return ret; | 58 | return ret; |
51 | } | 59 | } |
52 | 60 | ||
53 | gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; | 61 | gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; |
62 | gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; | ||
63 | gpuobj->node = node; | ||
54 | return 0; | 64 | return 0; |
55 | } | 65 | } |
56 | 66 | ||
57 | void | 67 | void |
58 | nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 68 | nvc0_instmem_put(struct nouveau_gpuobj *gpuobj) |
59 | { | 69 | { |
60 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 70 | struct nvc0_gpuobj_node *node; |
61 | 71 | ||
62 | if (gpuobj && gpuobj->im_backing) { | 72 | node = gpuobj->node; |
63 | if (gpuobj->im_bound) | 73 | gpuobj->node = NULL; |
64 | dev_priv->engine.instmem.unbind(dev, gpuobj); | 74 | |
65 | nouveau_bo_unpin(gpuobj->im_backing); | 75 | nouveau_bo_unpin(node->vram); |
66 | nouveau_bo_ref(NULL, &gpuobj->im_backing); | 76 | nouveau_bo_ref(NULL, &node->vram); |
67 | gpuobj->im_backing = NULL; | 77 | kfree(node); |
68 | } | ||
69 | } | 78 | } |
70 | 79 | ||
71 | int | 80 | int |
72 | nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 81 | nvc0_instmem_map(struct nouveau_gpuobj *gpuobj) |
73 | { | 82 | { |
74 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 83 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
75 | uint32_t pte, pte_end; | 84 | struct nvc0_gpuobj_node *node = gpuobj->node; |
76 | uint64_t vram; | 85 | struct drm_device *dev = gpuobj->dev; |
77 | 86 | struct drm_mm_node *ramin = NULL; | |
78 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) | 87 | u32 pte, pte_end; |
79 | return -EINVAL; | 88 | u64 vram; |
80 | 89 | ||
81 | NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", | 90 | do { |
82 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); | 91 | if (drm_mm_pre_get(&dev_priv->ramin_heap)) |
83 | 92 | return -ENOMEM; | |
84 | pte = gpuobj->im_pramin->start >> 12; | 93 | |
85 | pte_end = (gpuobj->im_pramin->size >> 12) + pte; | 94 | spin_lock(&dev_priv->ramin_lock); |
95 | ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, | ||
96 | node->align, 0); | ||
97 | if (ramin == NULL) { | ||
98 | spin_unlock(&dev_priv->ramin_lock); | ||
99 | return -ENOMEM; | ||
100 | } | ||
101 | |||
102 | ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); | ||
103 | spin_unlock(&dev_priv->ramin_lock); | ||
104 | } while (ramin == NULL); | ||
105 | |||
106 | pte = (ramin->start >> 12) << 1; | ||
107 | pte_end = ((ramin->size >> 12) << 1) + pte; | ||
86 | vram = gpuobj->vinst; | 108 | vram = gpuobj->vinst; |
87 | 109 | ||
88 | NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", | 110 | NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", |
89 | gpuobj->im_pramin->start, pte, pte_end); | 111 | ramin->start, pte, pte_end); |
90 | NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); | 112 | NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); |
91 | 113 | ||
92 | while (pte < pte_end) { | 114 | while (pte < pte_end) { |
@@ -103,30 +125,35 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
103 | nv_wr32(dev, 0x100cbc, 0x80000005); | 125 | nv_wr32(dev, 0x100cbc, 0x80000005); |
104 | } | 126 | } |
105 | 127 | ||
106 | gpuobj->im_bound = 1; | 128 | node->ramin = ramin; |
129 | gpuobj->pinst = ramin->start; | ||
107 | return 0; | 130 | return 0; |
108 | } | 131 | } |
109 | 132 | ||
110 | int | 133 | void |
111 | nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 134 | nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj) |
112 | { | 135 | { |
113 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 136 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
114 | uint32_t pte, pte_end; | 137 | struct nvc0_gpuobj_node *node = gpuobj->node; |
138 | u32 pte, pte_end; | ||
115 | 139 | ||
116 | if (gpuobj->im_bound == 0) | 140 | if (!node->ramin || !dev_priv->ramin_available) |
117 | return -EINVAL; | 141 | return; |
142 | |||
143 | pte = (node->ramin->start >> 12) << 1; | ||
144 | pte_end = ((node->ramin->size >> 12) << 1) + pte; | ||
118 | 145 | ||
119 | pte = gpuobj->im_pramin->start >> 12; | ||
120 | pte_end = (gpuobj->im_pramin->size >> 12) + pte; | ||
121 | while (pte < pte_end) { | 146 | while (pte < pte_end) { |
122 | nv_wr32(dev, 0x702000 + (pte * 8), 0); | 147 | nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0); |
123 | nv_wr32(dev, 0x702004 + (pte * 8), 0); | 148 | nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0); |
124 | pte++; | 149 | pte++; |
125 | } | 150 | } |
126 | dev_priv->engine.instmem.flush(dev); | 151 | dev_priv->engine.instmem.flush(gpuobj->dev); |
127 | 152 | ||
128 | gpuobj->im_bound = 0; | 153 | spin_lock(&dev_priv->ramin_lock); |
129 | return 0; | 154 | drm_mm_put_block(node->ramin); |
155 | node->ramin = NULL; | ||
156 | spin_unlock(&dev_priv->ramin_lock); | ||
130 | } | 157 | } |
131 | 158 | ||
132 | void | 159 | void |
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h index 881f8a585613..fe0f253089ac 100644 --- a/drivers/gpu/drm/nouveau/nvreg.h +++ b/drivers/gpu/drm/nouveau/nvreg.h | |||
@@ -153,7 +153,8 @@ | |||
153 | #define NV_PCRTC_START 0x00600800 | 153 | #define NV_PCRTC_START 0x00600800 |
154 | #define NV_PCRTC_CONFIG 0x00600804 | 154 | #define NV_PCRTC_CONFIG 0x00600804 |
155 | # define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0) | 155 | # define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0) |
156 | # define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) | 156 | # define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC (4 << 0) |
157 | # define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) | ||
157 | #define NV_PCRTC_CURSOR_CONFIG 0x00600810 | 158 | #define NV_PCRTC_CURSOR_CONFIG 0x00600810 |
158 | # define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0) | 159 | # define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0) |
159 | # define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4) | 160 | # define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4) |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 6cae4f2028d2..e97e6f842699 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -65,10 +65,13 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
65 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ | 65 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ |
66 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ | 66 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ |
67 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ | 67 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ |
68 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o | 68 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ |
69 | radeon_trace_points.o | ||
69 | 70 | ||
70 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 71 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
71 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o | 72 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o |
72 | radeon-$(CONFIG_ACPI) += radeon_acpi.o | 73 | radeon-$(CONFIG_ACPI) += radeon_acpi.o |
73 | 74 | ||
74 | obj-$(CONFIG_DRM_RADEON)+= radeon.o | 75 | obj-$(CONFIG_DRM_RADEON)+= radeon.o |
76 | |||
77 | CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file | ||
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h index c714179d1bfa..c61c3fe9fb98 100644 --- a/drivers/gpu/drm/radeon/ObjectID.h +++ b/drivers/gpu/drm/radeon/ObjectID.h | |||
@@ -37,6 +37,8 @@ | |||
37 | #define GRAPH_OBJECT_TYPE_CONNECTOR 0x3 | 37 | #define GRAPH_OBJECT_TYPE_CONNECTOR 0x3 |
38 | #define GRAPH_OBJECT_TYPE_ROUTER 0x4 | 38 | #define GRAPH_OBJECT_TYPE_ROUTER 0x4 |
39 | /* deleted */ | 39 | /* deleted */ |
40 | #define GRAPH_OBJECT_TYPE_DISPLAY_PATH 0x6 | ||
41 | #define GRAPH_OBJECT_TYPE_GENERIC 0x7 | ||
40 | 42 | ||
41 | /****************************************************/ | 43 | /****************************************************/ |
42 | /* Encoder Object ID Definition */ | 44 | /* Encoder Object ID Definition */ |
@@ -64,6 +66,9 @@ | |||
64 | #define ENCODER_OBJECT_ID_VT1623 0x10 | 66 | #define ENCODER_OBJECT_ID_VT1623 0x10 |
65 | #define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 | 67 | #define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 |
66 | #define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 | 68 | #define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 |
69 | #define ENCODER_OBJECT_ID_ALMOND 0x22 | ||
70 | #define ENCODER_OBJECT_ID_TRAVIS 0x23 | ||
71 | #define ENCODER_OBJECT_ID_NUTMEG 0x22 | ||
67 | /* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */ | 72 | /* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */ |
68 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 | 73 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 |
69 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 | 74 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 |
@@ -108,6 +113,7 @@ | |||
108 | #define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 | 113 | #define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 |
109 | #define CONNECTOR_OBJECT_ID_eDP 0x14 | 114 | #define CONNECTOR_OBJECT_ID_eDP 0x14 |
110 | #define CONNECTOR_OBJECT_ID_MXM 0x15 | 115 | #define CONNECTOR_OBJECT_ID_MXM 0x15 |
116 | #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 | ||
111 | 117 | ||
112 | /* deleted */ | 118 | /* deleted */ |
113 | 119 | ||
@@ -124,6 +130,7 @@ | |||
124 | #define GENERIC_OBJECT_ID_GLSYNC 0x01 | 130 | #define GENERIC_OBJECT_ID_GLSYNC 0x01 |
125 | #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02 | 131 | #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02 |
126 | #define GENERIC_OBJECT_ID_MXM_OPM 0x03 | 132 | #define GENERIC_OBJECT_ID_MXM_OPM 0x03 |
133 | #define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin | ||
127 | 134 | ||
128 | /****************************************************/ | 135 | /****************************************************/ |
129 | /* Graphics Object ENUM ID Definition */ | 136 | /* Graphics Object ENUM ID Definition */ |
@@ -360,6 +367,26 @@ | |||
360 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 367 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
361 | ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) | 368 | ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) |
362 | 369 | ||
370 | #define ENCODER_ALMOND_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
371 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
372 | ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT) | ||
373 | |||
374 | #define ENCODER_ALMOND_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
375 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
376 | ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT) | ||
377 | |||
378 | #define ENCODER_TRAVIS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
379 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
380 | ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT) | ||
381 | |||
382 | #define ENCODER_TRAVIS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
383 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
384 | ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT) | ||
385 | |||
386 | #define ENCODER_NUTMEG_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
387 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
388 | ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT) | ||
389 | |||
363 | /****************************************************/ | 390 | /****************************************************/ |
364 | /* Connector Object ID definition - Shared with BIOS */ | 391 | /* Connector Object ID definition - Shared with BIOS */ |
365 | /****************************************************/ | 392 | /****************************************************/ |
@@ -421,6 +448,14 @@ | |||
421 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 448 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
422 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) | 449 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) |
423 | 450 | ||
451 | #define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
452 | GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ | ||
453 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) | ||
454 | |||
455 | #define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
456 | GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ | ||
457 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) | ||
458 | |||
424 | #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 459 | #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
425 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 460 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
426 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) | 461 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) |
@@ -512,6 +547,7 @@ | |||
512 | #define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 547 | #define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
513 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 548 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
514 | CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) | 549 | CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) |
550 | |||
515 | #define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 551 | #define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
516 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 552 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
517 | CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) | 553 | CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) |
@@ -593,6 +629,14 @@ | |||
593 | GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\ | 629 | GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\ |
594 | CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC | 630 | CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC |
595 | 631 | ||
632 | #define CONNECTOR_LVDS_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
633 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
634 | CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT) | ||
635 | |||
636 | #define CONNECTOR_LVDS_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
637 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
638 | CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT) | ||
639 | |||
596 | /****************************************************/ | 640 | /****************************************************/ |
597 | /* Router Object ID definition - Shared with BIOS */ | 641 | /* Router Object ID definition - Shared with BIOS */ |
598 | /****************************************************/ | 642 | /****************************************************/ |
@@ -621,6 +665,10 @@ | |||
621 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 665 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
622 | GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT) | 666 | GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT) |
623 | 667 | ||
668 | #define GENERICOBJECT_STEREO_PIN_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ | ||
669 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
670 | GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT) | ||
671 | |||
624 | /****************************************************/ | 672 | /****************************************************/ |
625 | /* Object Cap definition - Shared with BIOS */ | 673 | /* Object Cap definition - Shared with BIOS */ |
626 | /****************************************************/ | 674 | /****************************************************/ |
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index fe359a239df3..58a0cd02c0a2 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h | |||
@@ -73,8 +73,18 @@ | |||
73 | #define ATOM_PPLL1 0 | 73 | #define ATOM_PPLL1 0 |
74 | #define ATOM_PPLL2 1 | 74 | #define ATOM_PPLL2 1 |
75 | #define ATOM_DCPLL 2 | 75 | #define ATOM_DCPLL 2 |
76 | #define ATOM_PPLL0 2 | ||
77 | #define ATOM_EXT_PLL1 8 | ||
78 | #define ATOM_EXT_PLL2 9 | ||
79 | #define ATOM_EXT_CLOCK 10 | ||
76 | #define ATOM_PPLL_INVALID 0xFF | 80 | #define ATOM_PPLL_INVALID 0xFF |
77 | 81 | ||
82 | #define ENCODER_REFCLK_SRC_P1PLL 0 | ||
83 | #define ENCODER_REFCLK_SRC_P2PLL 1 | ||
84 | #define ENCODER_REFCLK_SRC_DCPLL 2 | ||
85 | #define ENCODER_REFCLK_SRC_EXTCLK 3 | ||
86 | #define ENCODER_REFCLK_SRC_INVALID 0xFF | ||
87 | |||
78 | #define ATOM_SCALER1 0 | 88 | #define ATOM_SCALER1 0 |
79 | #define ATOM_SCALER2 1 | 89 | #define ATOM_SCALER2 1 |
80 | 90 | ||
@@ -192,6 +202,9 @@ typedef struct _ATOM_COMMON_TABLE_HEADER | |||
192 | /*Image can't be updated, while Driver needs to carry the new table! */ | 202 | /*Image can't be updated, while Driver needs to carry the new table! */ |
193 | }ATOM_COMMON_TABLE_HEADER; | 203 | }ATOM_COMMON_TABLE_HEADER; |
194 | 204 | ||
205 | /****************************************************************************/ | ||
206 | // Structure stores the ROM header. | ||
207 | /****************************************************************************/ | ||
195 | typedef struct _ATOM_ROM_HEADER | 208 | typedef struct _ATOM_ROM_HEADER |
196 | { | 209 | { |
197 | ATOM_COMMON_TABLE_HEADER sHeader; | 210 | ATOM_COMMON_TABLE_HEADER sHeader; |
@@ -221,6 +234,9 @@ typedef struct _ATOM_ROM_HEADER | |||
221 | #define USHORT void* | 234 | #define USHORT void* |
222 | #endif | 235 | #endif |
223 | 236 | ||
237 | /****************************************************************************/ | ||
238 | // Structures used in Command.mtb | ||
239 | /****************************************************************************/ | ||
224 | typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ | 240 | typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ |
225 | USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 | 241 | USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 |
226 | USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON | 242 | USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON |
@@ -312,6 +328,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ | |||
312 | #define SetUniphyInstance ASIC_StaticPwrMgtStatusChange | 328 | #define SetUniphyInstance ASIC_StaticPwrMgtStatusChange |
313 | #define HPDInterruptService ReadHWAssistedI2CStatus | 329 | #define HPDInterruptService ReadHWAssistedI2CStatus |
314 | #define EnableVGA_Access GetSCLKOverMCLKRatio | 330 | #define EnableVGA_Access GetSCLKOverMCLKRatio |
331 | #define GetDispObjectInfo EnableYUV | ||
315 | 332 | ||
316 | typedef struct _ATOM_MASTER_COMMAND_TABLE | 333 | typedef struct _ATOM_MASTER_COMMAND_TABLE |
317 | { | 334 | { |
@@ -357,6 +374,24 @@ typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER | |||
357 | /****************************************************************************/ | 374 | /****************************************************************************/ |
358 | #define COMPUTE_MEMORY_PLL_PARAM 1 | 375 | #define COMPUTE_MEMORY_PLL_PARAM 1 |
359 | #define COMPUTE_ENGINE_PLL_PARAM 2 | 376 | #define COMPUTE_ENGINE_PLL_PARAM 2 |
377 | #define ADJUST_MC_SETTING_PARAM 3 | ||
378 | |||
379 | /****************************************************************************/ | ||
380 | // Structures used by AdjustMemoryControllerTable | ||
381 | /****************************************************************************/ | ||
382 | typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ | ||
383 | { | ||
384 | #if ATOM_BIG_ENDIAN | ||
385 | ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block | ||
386 | ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] | ||
387 | ULONG ulClockFreq:24; | ||
388 | #else | ||
389 | ULONG ulClockFreq:24; | ||
390 | ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] | ||
391 | ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block | ||
392 | #endif | ||
393 | }ATOM_ADJUST_MEMORY_CLOCK_FREQ; | ||
394 | #define POINTER_RETURN_FLAG 0x80 | ||
360 | 395 | ||
361 | typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS | 396 | typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS |
362 | { | 397 | { |
@@ -440,6 +475,26 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 | |||
440 | #endif | 475 | #endif |
441 | }COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; | 476 | }COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; |
442 | 477 | ||
478 | typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 | ||
479 | { | ||
480 | union | ||
481 | { | ||
482 | ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter | ||
483 | ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter | ||
484 | }; | ||
485 | UCHAR ucRefDiv; //Output Parameter | ||
486 | UCHAR ucPostDiv; //Output Parameter | ||
487 | union | ||
488 | { | ||
489 | UCHAR ucCntlFlag; //Output Flags | ||
490 | UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode | ||
491 | }; | ||
492 | UCHAR ucReserved; | ||
493 | }COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5; | ||
494 | |||
495 | // ucInputFlag | ||
496 | #define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode | ||
497 | |||
443 | typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER | 498 | typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER |
444 | { | 499 | { |
445 | ATOM_COMPUTE_CLOCK_FREQ ulClock; | 500 | ATOM_COMPUTE_CLOCK_FREQ ulClock; |
@@ -583,6 +638,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS | |||
583 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 | 638 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 |
584 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 | 639 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 |
585 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 | 640 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 |
641 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02 | ||
586 | #define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04 | 642 | #define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04 |
587 | #define ATOM_ENCODER_CONFIG_LINKA 0x00 | 643 | #define ATOM_ENCODER_CONFIG_LINKA 0x00 |
588 | #define ATOM_ENCODER_CONFIG_LINKB 0x04 | 644 | #define ATOM_ENCODER_CONFIG_LINKB 0x04 |
@@ -608,6 +664,9 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS | |||
608 | #define ATOM_ENCODER_MODE_TV 13 | 664 | #define ATOM_ENCODER_MODE_TV 13 |
609 | #define ATOM_ENCODER_MODE_CV 14 | 665 | #define ATOM_ENCODER_MODE_CV 14 |
610 | #define ATOM_ENCODER_MODE_CRT 15 | 666 | #define ATOM_ENCODER_MODE_CRT 15 |
667 | #define ATOM_ENCODER_MODE_DVO 16 | ||
668 | #define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2 | ||
669 | #define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2 | ||
611 | 670 | ||
612 | typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 | 671 | typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 |
613 | { | 672 | { |
@@ -661,6 +720,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 | |||
661 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08 | 720 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08 |
662 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09 | 721 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09 |
663 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a | 722 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a |
723 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13 | ||
664 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b | 724 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b |
665 | #define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c | 725 | #define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c |
666 | #define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d | 726 | #define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d |
@@ -671,24 +731,34 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 | |||
671 | #define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 | 731 | #define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 |
672 | #define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00 | 732 | #define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00 |
673 | 733 | ||
734 | //ucTableFormatRevision=1 | ||
735 | //ucTableContentRevision=3 | ||
674 | // Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver | 736 | // Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver |
675 | typedef struct _ATOM_DIG_ENCODER_CONFIG_V3 | 737 | typedef struct _ATOM_DIG_ENCODER_CONFIG_V3 |
676 | { | 738 | { |
677 | #if ATOM_BIG_ENDIAN | 739 | #if ATOM_BIG_ENDIAN |
678 | UCHAR ucReserved1:1; | 740 | UCHAR ucReserved1:1; |
679 | UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F | 741 | UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) |
680 | UCHAR ucReserved:3; | 742 | UCHAR ucReserved:3; |
681 | UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz | 743 | UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz |
682 | #else | 744 | #else |
683 | UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz | 745 | UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz |
684 | UCHAR ucReserved:3; | 746 | UCHAR ucReserved:3; |
685 | UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F | 747 | UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) |
686 | UCHAR ucReserved1:1; | 748 | UCHAR ucReserved1:1; |
687 | #endif | 749 | #endif |
688 | }ATOM_DIG_ENCODER_CONFIG_V3; | 750 | }ATOM_DIG_ENCODER_CONFIG_V3; |
689 | 751 | ||
752 | #define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 | ||
753 | #define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 | ||
754 | #define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 | ||
690 | #define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70 | 755 | #define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70 |
691 | 756 | #define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00 | |
757 | #define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10 | ||
758 | #define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20 | ||
759 | #define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30 | ||
760 | #define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40 | ||
761 | #define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50 | ||
692 | 762 | ||
693 | typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 | 763 | typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 |
694 | { | 764 | { |
@@ -707,6 +777,56 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 | |||
707 | UCHAR ucReserved; | 777 | UCHAR ucReserved; |
708 | }DIG_ENCODER_CONTROL_PARAMETERS_V3; | 778 | }DIG_ENCODER_CONTROL_PARAMETERS_V3; |
709 | 779 | ||
780 | //ucTableFormatRevision=1 | ||
781 | //ucTableContentRevision=4 | ||
782 | // start from NI | ||
783 | // Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver | ||
784 | typedef struct _ATOM_DIG_ENCODER_CONFIG_V4 | ||
785 | { | ||
786 | #if ATOM_BIG_ENDIAN | ||
787 | UCHAR ucReserved1:1; | ||
788 | UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) | ||
789 | UCHAR ucReserved:2; | ||
790 | UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version | ||
791 | #else | ||
792 | UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version | ||
793 | UCHAR ucReserved:2; | ||
794 | UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) | ||
795 | UCHAR ucReserved1:1; | ||
796 | #endif | ||
797 | }ATOM_DIG_ENCODER_CONFIG_V4; | ||
798 | |||
799 | #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03 | ||
800 | #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00 | ||
801 | #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01 | ||
802 | #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02 | ||
803 | #define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70 | ||
804 | #define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00 | ||
805 | #define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10 | ||
806 | #define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20 | ||
807 | #define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30 | ||
808 | #define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40 | ||
809 | #define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50 | ||
810 | |||
811 | typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4 | ||
812 | { | ||
813 | USHORT usPixelClock; // in 10KHz; for bios convenient | ||
814 | union{ | ||
815 | ATOM_DIG_ENCODER_CONFIG_V4 acConfig; | ||
816 | UCHAR ucConfig; | ||
817 | }; | ||
818 | UCHAR ucAction; | ||
819 | UCHAR ucEncoderMode; | ||
820 | // =0: DP encoder | ||
821 | // =1: LVDS encoder | ||
822 | // =2: DVI encoder | ||
823 | // =3: HDMI encoder | ||
824 | // =4: SDVO encoder | ||
825 | // =5: DP audio | ||
826 | UCHAR ucLaneNum; // how many lanes to enable | ||
827 | UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP | ||
828 | UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version | ||
829 | }DIG_ENCODER_CONTROL_PARAMETERS_V4; | ||
710 | 830 | ||
711 | // define ucBitPerColor: | 831 | // define ucBitPerColor: |
712 | #define PANEL_BPC_UNDEFINE 0x00 | 832 | #define PANEL_BPC_UNDEFINE 0x00 |
@@ -893,6 +1013,7 @@ typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3 | |||
893 | #endif | 1013 | #endif |
894 | }ATOM_DIG_TRANSMITTER_CONFIG_V3; | 1014 | }ATOM_DIG_TRANSMITTER_CONFIG_V3; |
895 | 1015 | ||
1016 | |||
896 | typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 | 1017 | typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 |
897 | { | 1018 | { |
898 | union | 1019 | union |
@@ -936,6 +1057,149 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 | |||
936 | #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD | 1057 | #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD |
937 | #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF | 1058 | #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF |
938 | 1059 | ||
1060 | |||
1061 | /****************************************************************************/ | ||
1062 | // Structures used by UNIPHYTransmitterControlTable V1.4 | ||
1063 | // ASIC Families: NI | ||
1064 | // ucTableFormatRevision=1 | ||
1065 | // ucTableContentRevision=4 | ||
1066 | /****************************************************************************/ | ||
1067 | typedef struct _ATOM_DP_VS_MODE_V4 | ||
1068 | { | ||
1069 | UCHAR ucLaneSel; | ||
1070 | union | ||
1071 | { | ||
1072 | UCHAR ucLaneSet; | ||
1073 | struct { | ||
1074 | #if ATOM_BIG_ENDIAN | ||
1075 | UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 | ||
1076 | UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level | ||
1077 | UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level | ||
1078 | #else | ||
1079 | UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level | ||
1080 | UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level | ||
1081 | UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 | ||
1082 | #endif | ||
1083 | }; | ||
1084 | }; | ||
1085 | }ATOM_DP_VS_MODE_V4; | ||
1086 | |||
1087 | typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4 | ||
1088 | { | ||
1089 | #if ATOM_BIG_ENDIAN | ||
1090 | UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) | ||
1091 | // =1 Dig Transmitter 2 ( Uniphy CD ) | ||
1092 | // =2 Dig Transmitter 3 ( Uniphy EF ) | ||
1093 | UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New | ||
1094 | UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F | ||
1095 | UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E | ||
1096 | // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F | ||
1097 | UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) | ||
1098 | UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector | ||
1099 | #else | ||
1100 | UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector | ||
1101 | UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) | ||
1102 | UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E | ||
1103 | // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F | ||
1104 | UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F | ||
1105 | UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New | ||
1106 | UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) | ||
1107 | // =1 Dig Transmitter 2 ( Uniphy CD ) | ||
1108 | // =2 Dig Transmitter 3 ( Uniphy EF ) | ||
1109 | #endif | ||
1110 | }ATOM_DIG_TRANSMITTER_CONFIG_V4; | ||
1111 | |||
1112 | typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 | ||
1113 | { | ||
1114 | union | ||
1115 | { | ||
1116 | USHORT usPixelClock; // in 10KHz; for bios convenient | ||
1117 | USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h | ||
1118 | ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version | ||
1119 | }; | ||
1120 | union | ||
1121 | { | ||
1122 | ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig; | ||
1123 | UCHAR ucConfig; | ||
1124 | }; | ||
1125 | UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX | ||
1126 | UCHAR ucLaneNum; | ||
1127 | UCHAR ucReserved[3]; | ||
1128 | }DIG_TRANSMITTER_CONTROL_PARAMETERS_V4; | ||
1129 | |||
1130 | //ucConfig | ||
1131 | //Bit0 | ||
1132 | #define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01 | ||
1133 | //Bit1 | ||
1134 | #define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02 | ||
1135 | //Bit2 | ||
1136 | #define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04 | ||
1137 | #define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00 | ||
1138 | #define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04 | ||
1139 | // Bit3 | ||
1140 | #define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08 | ||
1141 | #define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00 | ||
1142 | #define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08 | ||
1143 | // Bit5:4 | ||
1144 | #define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30 | ||
1145 | #define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00 | ||
1146 | #define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10 | ||
1147 | #define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4 | ||
1148 | #define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3 | ||
1149 | // Bit7:6 | ||
1150 | #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0 | ||
1151 | #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB | ||
1152 | #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD | ||
1153 | #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF | ||
1154 | |||
1155 | |||
1156 | /****************************************************************************/ | ||
1157 | // Structures used by ExternalEncoderControlTable V1.3 | ||
1158 | // ASIC Families: Evergreen, Llano, NI | ||
1159 | // ucTableFormatRevision=1 | ||
1160 | // ucTableContentRevision=3 | ||
1161 | /****************************************************************************/ | ||
1162 | |||
1163 | typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 | ||
1164 | { | ||
1165 | union{ | ||
1166 | USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT | ||
1167 | USHORT usConnectorId; // connector id, valid when ucAction = INIT | ||
1168 | }; | ||
1169 | UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT | ||
1170 | UCHAR ucAction; // | ||
1171 | UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT | ||
1172 | UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT | ||
1173 | UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP | ||
1174 | UCHAR ucReserved; | ||
1175 | }EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3; | ||
1176 | |||
1177 | // ucAction | ||
1178 | #define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00 | ||
1179 | #define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01 | ||
1180 | #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07 | ||
1181 | #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f | ||
1182 | #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10 | ||
1183 | #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11 | ||
1184 | #define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12 | ||
1185 | |||
1186 | // ucConfig | ||
1187 | #define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 | ||
1188 | #define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 | ||
1189 | #define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 | ||
1190 | #define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02 | ||
1191 | #define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK 0x70 | ||
1192 | #define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00 | ||
1193 | #define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10 | ||
1194 | #define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20 | ||
1195 | |||
1196 | typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 | ||
1197 | { | ||
1198 | EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder; | ||
1199 | ULONG ulReserved[2]; | ||
1200 | }EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3; | ||
1201 | |||
1202 | |||
939 | /****************************************************************************/ | 1203 | /****************************************************************************/ |
940 | // Structures used by DAC1OuputControlTable | 1204 | // Structures used by DAC1OuputControlTable |
941 | // DAC2OuputControlTable | 1205 | // DAC2OuputControlTable |
@@ -1142,6 +1406,7 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V2 | |||
1142 | #define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10 | 1406 | #define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10 |
1143 | #define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20 | 1407 | #define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20 |
1144 | 1408 | ||
1409 | |||
1145 | typedef struct _PIXEL_CLOCK_PARAMETERS_V3 | 1410 | typedef struct _PIXEL_CLOCK_PARAMETERS_V3 |
1146 | { | 1411 | { |
1147 | USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) | 1412 | USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) |
@@ -1202,6 +1467,55 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V5 | |||
1202 | #define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08 | 1467 | #define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08 |
1203 | #define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10 | 1468 | #define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10 |
1204 | 1469 | ||
1470 | typedef struct _CRTC_PIXEL_CLOCK_FREQ | ||
1471 | { | ||
1472 | #if ATOM_BIG_ENDIAN | ||
1473 | ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to | ||
1474 | // drive the pixel clock. not used for DCPLL case. | ||
1475 | ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. | ||
1476 | // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. | ||
1477 | #else | ||
1478 | ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. | ||
1479 | // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. | ||
1480 | ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to | ||
1481 | // drive the pixel clock. not used for DCPLL case. | ||
1482 | #endif | ||
1483 | }CRTC_PIXEL_CLOCK_FREQ; | ||
1484 | |||
1485 | typedef struct _PIXEL_CLOCK_PARAMETERS_V6 | ||
1486 | { | ||
1487 | union{ | ||
1488 | CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency | ||
1489 | ULONG ulDispEngClkFreq; // dispclk frequency | ||
1490 | }; | ||
1491 | USHORT usFbDiv; // feedback divider integer part. | ||
1492 | UCHAR ucPostDiv; // post divider. | ||
1493 | UCHAR ucRefDiv; // Reference divider | ||
1494 | UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL | ||
1495 | UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h, | ||
1496 | // indicate which graphic encoder will be used. | ||
1497 | UCHAR ucEncoderMode; // Encoder mode: | ||
1498 | UCHAR ucMiscInfo; // bit[0]= Force program PPLL | ||
1499 | // bit[1]= when VGA timing is used. | ||
1500 | // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp | ||
1501 | // bit[4]= RefClock source for PPLL. | ||
1502 | // =0: XTLAIN( default mode ) | ||
1503 | // =1: other external clock source, which is pre-defined | ||
1504 | // by VBIOS depend on the feature required. | ||
1505 | // bit[7:5]: reserved. | ||
1506 | ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 ) | ||
1507 | |||
1508 | }PIXEL_CLOCK_PARAMETERS_V6; | ||
1509 | |||
1510 | #define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01 | ||
1511 | #define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02 | ||
1512 | #define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c | ||
1513 | #define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00 | ||
1514 | #define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04 | ||
1515 | #define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08 | ||
1516 | #define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c | ||
1517 | #define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10 | ||
1518 | |||
1205 | typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 | 1519 | typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 |
1206 | { | 1520 | { |
1207 | PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput; | 1521 | PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput; |
@@ -1241,10 +1555,11 @@ typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS | |||
1241 | typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 | 1555 | typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 |
1242 | { | 1556 | { |
1243 | USHORT usPixelClock; // target pixel clock | 1557 | USHORT usPixelClock; // target pixel clock |
1244 | UCHAR ucTransmitterID; // transmitter id defined in objectid.h | 1558 | UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h |
1245 | UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI | 1559 | UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI |
1246 | UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX | 1560 | UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX |
1247 | UCHAR ucReserved[3]; | 1561 | UCHAR ucExtTransmitterID; // external encoder id. |
1562 | UCHAR ucReserved[2]; | ||
1248 | }ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3; | 1563 | }ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3; |
1249 | 1564 | ||
1250 | // usDispPllConfig v1.2 for RoadRunner | 1565 | // usDispPllConfig v1.2 for RoadRunner |
@@ -1358,6 +1673,7 @@ typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS | |||
1358 | /**************************************************************************/ | 1673 | /**************************************************************************/ |
1359 | #define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS | 1674 | #define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS |
1360 | 1675 | ||
1676 | |||
1361 | /****************************************************************************/ | 1677 | /****************************************************************************/ |
1362 | // Structures used by PowerConnectorDetectionTable | 1678 | // Structures used by PowerConnectorDetectionTable |
1363 | /****************************************************************************/ | 1679 | /****************************************************************************/ |
@@ -1438,6 +1754,31 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 | |||
1438 | #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00 | 1754 | #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00 |
1439 | #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8 | 1755 | #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8 |
1440 | 1756 | ||
1757 | // Used by DCE5.0 | ||
1758 | typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 | ||
1759 | { | ||
1760 | USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0 | ||
1761 | UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread. | ||
1762 | // Bit[1]: 1-Ext. 0-Int. | ||
1763 | // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL | ||
1764 | // Bits[7:4] reserved | ||
1765 | UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE | ||
1766 | USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8] | ||
1767 | USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC | ||
1768 | }ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3; | ||
1769 | |||
1770 | #define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00 | ||
1771 | #define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01 | ||
1772 | #define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02 | ||
1773 | #define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c | ||
1774 | #define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00 | ||
1775 | #define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04 | ||
1776 | #define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08 | ||
1777 | #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF | ||
1778 | #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0 | ||
1779 | #define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00 | ||
1780 | #define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8 | ||
1781 | |||
1441 | #define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL | 1782 | #define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL |
1442 | 1783 | ||
1443 | /**************************************************************************/ | 1784 | /**************************************************************************/ |
@@ -1706,7 +2047,7 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES | |||
1706 | USHORT StandardVESA_Timing; // Only used by Bios | 2047 | USHORT StandardVESA_Timing; // Only used by Bios |
1707 | USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 | 2048 | USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 |
1708 | USHORT DAC_Info; // Will be obsolete from R600 | 2049 | USHORT DAC_Info; // Will be obsolete from R600 |
1709 | USHORT LVDS_Info; // Shared by various SW components,latest version 1.1 | 2050 | USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info |
1710 | USHORT TMDS_Info; // Will be obsolete from R600 | 2051 | USHORT TMDS_Info; // Will be obsolete from R600 |
1711 | USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 | 2052 | USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 |
1712 | USHORT SupportedDevicesInfo; // Will be obsolete from R600 | 2053 | USHORT SupportedDevicesInfo; // Will be obsolete from R600 |
@@ -1736,12 +2077,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES | |||
1736 | USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 | 2077 | USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 |
1737 | }ATOM_MASTER_LIST_OF_DATA_TABLES; | 2078 | }ATOM_MASTER_LIST_OF_DATA_TABLES; |
1738 | 2079 | ||
2080 | // For backward compatible | ||
2081 | #define LVDS_Info LCD_Info | ||
2082 | |||
1739 | typedef struct _ATOM_MASTER_DATA_TABLE | 2083 | typedef struct _ATOM_MASTER_DATA_TABLE |
1740 | { | 2084 | { |
1741 | ATOM_COMMON_TABLE_HEADER sHeader; | 2085 | ATOM_COMMON_TABLE_HEADER sHeader; |
1742 | ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; | 2086 | ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; |
1743 | }ATOM_MASTER_DATA_TABLE; | 2087 | }ATOM_MASTER_DATA_TABLE; |
1744 | 2088 | ||
2089 | |||
1745 | /****************************************************************************/ | 2090 | /****************************************************************************/ |
1746 | // Structure used in MultimediaCapabilityInfoTable | 2091 | // Structure used in MultimediaCapabilityInfoTable |
1747 | /****************************************************************************/ | 2092 | /****************************************************************************/ |
@@ -1776,6 +2121,7 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO | |||
1776 | UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) | 2121 | UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) |
1777 | }ATOM_MULTIMEDIA_CONFIG_INFO; | 2122 | }ATOM_MULTIMEDIA_CONFIG_INFO; |
1778 | 2123 | ||
2124 | |||
1779 | /****************************************************************************/ | 2125 | /****************************************************************************/ |
1780 | // Structures used in FirmwareInfoTable | 2126 | // Structures used in FirmwareInfoTable |
1781 | /****************************************************************************/ | 2127 | /****************************************************************************/ |
@@ -2031,8 +2377,47 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_1 | |||
2031 | UCHAR ucReserved4[3]; | 2377 | UCHAR ucReserved4[3]; |
2032 | }ATOM_FIRMWARE_INFO_V2_1; | 2378 | }ATOM_FIRMWARE_INFO_V2_1; |
2033 | 2379 | ||
2380 | //the structure below to be used from NI | ||
2381 | //ucTableFormatRevision=2 | ||
2382 | //ucTableContentRevision=2 | ||
2383 | typedef struct _ATOM_FIRMWARE_INFO_V2_2 | ||
2384 | { | ||
2385 | ATOM_COMMON_TABLE_HEADER sHeader; | ||
2386 | ULONG ulFirmwareRevision; | ||
2387 | ULONG ulDefaultEngineClock; //In 10Khz unit | ||
2388 | ULONG ulDefaultMemoryClock; //In 10Khz unit | ||
2389 | ULONG ulReserved[2]; | ||
2390 | ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit* | ||
2391 | ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit* | ||
2392 | ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit | ||
2393 | ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ? | ||
2394 | ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage. | ||
2395 | UCHAR ucReserved3; //Was ucASICMaxTemperature; | ||
2396 | UCHAR ucMinAllowedBL_Level; | ||
2397 | USHORT usBootUpVDDCVoltage; //In MV unit | ||
2398 | USHORT usLcdMinPixelClockPLL_Output; // In MHz unit | ||
2399 | USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit | ||
2400 | ULONG ulReserved4; //Was ulAsicMaximumVoltage | ||
2401 | ULONG ulMinPixelClockPLL_Output; //In 10Khz unit | ||
2402 | ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input | ||
2403 | ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input | ||
2404 | ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output | ||
2405 | USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC | ||
2406 | USHORT usMinPixelClockPLL_Input; //In 10Khz unit | ||
2407 | USHORT usMaxPixelClockPLL_Input; //In 10Khz unit | ||
2408 | USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; | ||
2409 | ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; | ||
2410 | USHORT usCoreReferenceClock; //In 10Khz unit | ||
2411 | USHORT usMemoryReferenceClock; //In 10Khz unit | ||
2412 | USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock | ||
2413 | UCHAR ucMemoryModule_ID; //Indicate what is the board design | ||
2414 | UCHAR ucReserved9[3]; | ||
2415 | USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; | ||
2416 | USHORT usReserved12; | ||
2417 | ULONG ulReserved10[3]; // New added comparing to previous version | ||
2418 | }ATOM_FIRMWARE_INFO_V2_2; | ||
2034 | 2419 | ||
2035 | #define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1 | 2420 | #define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2 |
2036 | 2421 | ||
2037 | /****************************************************************************/ | 2422 | /****************************************************************************/ |
2038 | // Structures used in IntegratedSystemInfoTable | 2423 | // Structures used in IntegratedSystemInfoTable |
@@ -2212,7 +2597,7 @@ ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pi | |||
2212 | ucDockingPinBit: which bit in this register to read the pin status; | 2597 | ucDockingPinBit: which bit in this register to read the pin status; |
2213 | ucDockingPinPolarity:Polarity of the pin when docked; | 2598 | ucDockingPinPolarity:Polarity of the pin when docked; |
2214 | 2599 | ||
2215 | ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0 | 2600 | ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0 |
2216 | 2601 | ||
2217 | usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. | 2602 | usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. |
2218 | 2603 | ||
@@ -2250,6 +2635,14 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep | |||
2250 | usMinDownStreamHTLinkWidth: same as above. | 2635 | usMinDownStreamHTLinkWidth: same as above. |
2251 | */ | 2636 | */ |
2252 | 2637 | ||
2638 | // ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition | ||
2639 | #define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0 | ||
2640 | #define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1 | ||
2641 | #define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2 | ||
2642 | #define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3 | ||
2643 | #define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4 | ||
2644 | |||
2645 | #define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code | ||
2253 | 2646 | ||
2254 | #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 | 2647 | #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 |
2255 | #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 | 2648 | #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 |
@@ -2778,8 +3171,88 @@ typedef struct _ATOM_LVDS_INFO_V12 | |||
2778 | #define PANEL_RANDOM_DITHER 0x80 | 3171 | #define PANEL_RANDOM_DITHER 0x80 |
2779 | #define PANEL_RANDOM_DITHER_MASK 0x80 | 3172 | #define PANEL_RANDOM_DITHER_MASK 0x80 |
2780 | 3173 | ||
3174 | #define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this | ||
3175 | |||
3176 | /****************************************************************************/ | ||
3177 | // Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12 | ||
3178 | // ASIC Families: NI | ||
3179 | // ucTableFormatRevision=1 | ||
3180 | // ucTableContentRevision=3 | ||
3181 | /****************************************************************************/ | ||
3182 | typedef struct _ATOM_LCD_INFO_V13 | ||
3183 | { | ||
3184 | ATOM_COMMON_TABLE_HEADER sHeader; | ||
3185 | ATOM_DTD_FORMAT sLCDTiming; | ||
3186 | USHORT usExtInfoTableOffset; | ||
3187 | USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. | ||
3188 | ULONG ulReserved0; | ||
3189 | UCHAR ucLCD_Misc; // Reorganized in V13 | ||
3190 | // Bit0: {=0:single, =1:dual}, | ||
3191 | // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB}, | ||
3192 | // Bit3:2: {Grey level} | ||
3193 | // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) | ||
3194 | // Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it? | ||
3195 | UCHAR ucPanelDefaultRefreshRate; | ||
3196 | UCHAR ucPanelIdentification; | ||
3197 | UCHAR ucSS_Id; | ||
3198 | USHORT usLCDVenderID; | ||
3199 | USHORT usLCDProductID; | ||
3200 | UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13 | ||
3201 | // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own | ||
3202 | // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED | ||
3203 | // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1) | ||
3204 | // Bit7-3: Reserved | ||
3205 | UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable | ||
3206 | USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13 | ||
3207 | |||
3208 | UCHAR ucPowerSequenceDIGONtoDE_in4Ms; | ||
3209 | UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms; | ||
3210 | UCHAR ucPowerSequenceDEtoDIGON_in4Ms; | ||
3211 | UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms; | ||
3212 | |||
3213 | UCHAR ucOffDelay_in4Ms; | ||
3214 | UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms; | ||
3215 | UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms; | ||
3216 | UCHAR ucReserved1; | ||
3217 | |||
3218 | ULONG ulReserved[4]; | ||
3219 | }ATOM_LCD_INFO_V13; | ||
3220 | |||
3221 | #define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13 | ||
3222 | |||
3223 | //Definitions for ucLCD_Misc | ||
3224 | #define ATOM_PANEL_MISC_V13_DUAL 0x00000001 | ||
3225 | #define ATOM_PANEL_MISC_V13_FPDI 0x00000002 | ||
3226 | #define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C | ||
3227 | #define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2 | ||
3228 | #define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70 | ||
3229 | #define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10 | ||
3230 | #define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20 | ||
3231 | |||
3232 | //Color Bit Depth definition in EDID V1.4 @BYTE 14h | ||
3233 | //Bit 6 5 4 | ||
3234 | // 0 0 0 - Color bit depth is undefined | ||
3235 | // 0 0 1 - 6 Bits per Primary Color | ||
3236 | // 0 1 0 - 8 Bits per Primary Color | ||
3237 | // 0 1 1 - 10 Bits per Primary Color | ||
3238 | // 1 0 0 - 12 Bits per Primary Color | ||
3239 | // 1 0 1 - 14 Bits per Primary Color | ||
3240 | // 1 1 0 - 16 Bits per Primary Color | ||
3241 | // 1 1 1 - Reserved | ||
3242 | |||
3243 | //Definitions for ucLCDPanel_SpecialHandlingCap: | ||
3244 | |||
3245 | //Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. | ||
3246 | //Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL | ||
3247 | #define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version | ||
3248 | |||
3249 | //If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together | ||
3250 | //with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static | ||
3251 | //refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12 | ||
3252 | #define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version | ||
2781 | 3253 | ||
2782 | #define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 | 3254 | //Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP. |
3255 | #define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version | ||
2783 | 3256 | ||
2784 | typedef struct _ATOM_PATCH_RECORD_MODE | 3257 | typedef struct _ATOM_PATCH_RECORD_MODE |
2785 | { | 3258 | { |
@@ -2944,9 +3417,9 @@ typedef struct _ATOM_DPCD_INFO | |||
2944 | #define MAX_DTD_MODE_IN_VRAM 6 | 3417 | #define MAX_DTD_MODE_IN_VRAM 6 |
2945 | #define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) | 3418 | #define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) |
2946 | #define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) | 3419 | #define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) |
2947 | #define DFP_ENCODER_TYPE_OFFSET 0x80 | 3420 | //20 bytes for Encoder Type and DPCD in STD EDID area |
2948 | #define DP_ENCODER_LANE_NUM_OFFSET 0x84 | 3421 | #define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20) |
2949 | #define DP_ENCODER_LINK_RATE_OFFSET 0x88 | 3422 | #define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 ) |
2950 | 3423 | ||
2951 | #define ATOM_HWICON1_SURFACE_ADDR 0 | 3424 | #define ATOM_HWICON1_SURFACE_ADDR 0 |
2952 | #define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) | 3425 | #define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) |
@@ -2997,14 +3470,16 @@ typedef struct _ATOM_DPCD_INFO | |||
2997 | #define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) | 3470 | #define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) |
2998 | #define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) | 3471 | #define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) |
2999 | 3472 | ||
3000 | #define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) | 3473 | #define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) |
3001 | 3474 | ||
3002 | #define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256) | 3475 | #define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024) |
3003 | #define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512 | 3476 | #define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START + 512 |
3004 | 3477 | ||
3005 | //The size below is in Kb! | 3478 | //The size below is in Kb! |
3006 | #define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) | 3479 | #define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) |
3007 | 3480 | ||
3481 | #define ATOM_VRAM_RESERVE_V2_SIZE 32 | ||
3482 | |||
3008 | #define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L | 3483 | #define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L |
3009 | #define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 | 3484 | #define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 |
3010 | #define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 | 3485 | #define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 |
@@ -3206,6 +3681,15 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH | |||
3206 | USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. | 3681 | USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. |
3207 | }ATOM_DISPLAY_OBJECT_PATH; | 3682 | }ATOM_DISPLAY_OBJECT_PATH; |
3208 | 3683 | ||
3684 | typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH | ||
3685 | { | ||
3686 | USHORT usDeviceTag; //supported device | ||
3687 | USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH | ||
3688 | USHORT usConnObjectId; //Connector Object ID | ||
3689 | USHORT usGPUObjectId; //GPU ID | ||
3690 | USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder | ||
3691 | }ATOM_DISPLAY_EXTERNAL_OBJECT_PATH; | ||
3692 | |||
3209 | typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE | 3693 | typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE |
3210 | { | 3694 | { |
3211 | UCHAR ucNumOfDispPath; | 3695 | UCHAR ucNumOfDispPath; |
@@ -3261,6 +3745,47 @@ typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset | |||
3261 | #define EXT_AUXDDC_LUTINDEX_7 7 | 3745 | #define EXT_AUXDDC_LUTINDEX_7 7 |
3262 | #define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1) | 3746 | #define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1) |
3263 | 3747 | ||
3748 | //ucChannelMapping are defined as following | ||
3749 | //for DP connector, eDP, DP to VGA/LVDS | ||
3750 | //Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3751 | //Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3752 | //Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3753 | //Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3754 | typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING | ||
3755 | { | ||
3756 | #if ATOM_BIG_ENDIAN | ||
3757 | UCHAR ucDP_Lane3_Source:2; | ||
3758 | UCHAR ucDP_Lane2_Source:2; | ||
3759 | UCHAR ucDP_Lane1_Source:2; | ||
3760 | UCHAR ucDP_Lane0_Source:2; | ||
3761 | #else | ||
3762 | UCHAR ucDP_Lane0_Source:2; | ||
3763 | UCHAR ucDP_Lane1_Source:2; | ||
3764 | UCHAR ucDP_Lane2_Source:2; | ||
3765 | UCHAR ucDP_Lane3_Source:2; | ||
3766 | #endif | ||
3767 | }ATOM_DP_CONN_CHANNEL_MAPPING; | ||
3768 | |||
3769 | //for DVI/HDMI, in dual link case, both links have to have same mapping. | ||
3770 | //Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3771 | //Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3772 | //Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3773 | //Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3774 | typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING | ||
3775 | { | ||
3776 | #if ATOM_BIG_ENDIAN | ||
3777 | UCHAR ucDVI_CLK_Source:2; | ||
3778 | UCHAR ucDVI_DATA0_Source:2; | ||
3779 | UCHAR ucDVI_DATA1_Source:2; | ||
3780 | UCHAR ucDVI_DATA2_Source:2; | ||
3781 | #else | ||
3782 | UCHAR ucDVI_DATA2_Source:2; | ||
3783 | UCHAR ucDVI_DATA1_Source:2; | ||
3784 | UCHAR ucDVI_DATA0_Source:2; | ||
3785 | UCHAR ucDVI_CLK_Source:2; | ||
3786 | #endif | ||
3787 | }ATOM_DVI_CONN_CHANNEL_MAPPING; | ||
3788 | |||
3264 | typedef struct _EXT_DISPLAY_PATH | 3789 | typedef struct _EXT_DISPLAY_PATH |
3265 | { | 3790 | { |
3266 | USHORT usDeviceTag; //A bit vector to show what devices are supported | 3791 | USHORT usDeviceTag; //A bit vector to show what devices are supported |
@@ -3269,7 +3794,13 @@ typedef struct _EXT_DISPLAY_PATH | |||
3269 | UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT | 3794 | UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT |
3270 | UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT | 3795 | UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT |
3271 | USHORT usExtEncoderObjId; //external encoder object id | 3796 | USHORT usExtEncoderObjId; //external encoder object id |
3272 | USHORT usReserved[3]; | 3797 | union{ |
3798 | UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping | ||
3799 | ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping; | ||
3800 | ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping; | ||
3801 | }; | ||
3802 | UCHAR ucReserved; | ||
3803 | USHORT usReserved[2]; | ||
3273 | }EXT_DISPLAY_PATH; | 3804 | }EXT_DISPLAY_PATH; |
3274 | 3805 | ||
3275 | #define NUMBER_OF_UCHAR_FOR_GUID 16 | 3806 | #define NUMBER_OF_UCHAR_FOR_GUID 16 |
@@ -3281,7 +3812,8 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO | |||
3281 | UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string | 3812 | UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string |
3282 | EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. | 3813 | EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. |
3283 | UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. | 3814 | UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. |
3284 | UCHAR Reserved [7]; // for potential expansion | 3815 | UCHAR uc3DStereoPinId; // use for eDP panel |
3816 | UCHAR Reserved [6]; // for potential expansion | ||
3285 | }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; | 3817 | }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; |
3286 | 3818 | ||
3287 | //Related definitions, all records are differnt but they have a commond header | 3819 | //Related definitions, all records are differnt but they have a commond header |
@@ -3311,10 +3843,11 @@ typedef struct _ATOM_COMMON_RECORD_HEADER | |||
3311 | #define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table | 3843 | #define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table |
3312 | #define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record | 3844 | #define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record |
3313 | #define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 | 3845 | #define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 |
3846 | #define ATOM_ENCODER_CAP_RECORD_TYPE 20 | ||
3314 | 3847 | ||
3315 | 3848 | ||
3316 | //Must be updated when new record type is added,equal to that record definition! | 3849 | //Must be updated when new record type is added,equal to that record definition! |
3317 | #define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE | 3850 | #define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE |
3318 | 3851 | ||
3319 | typedef struct _ATOM_I2C_RECORD | 3852 | typedef struct _ATOM_I2C_RECORD |
3320 | { | 3853 | { |
@@ -3441,6 +3974,26 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD | |||
3441 | UCHAR ucPadding[2]; | 3974 | UCHAR ucPadding[2]; |
3442 | }ATOM_ENCODER_DVO_CF_RECORD; | 3975 | }ATOM_ENCODER_DVO_CF_RECORD; |
3443 | 3976 | ||
3977 | // Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap | ||
3978 | #define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path | ||
3979 | |||
3980 | typedef struct _ATOM_ENCODER_CAP_RECORD | ||
3981 | { | ||
3982 | ATOM_COMMON_RECORD_HEADER sheader; | ||
3983 | union { | ||
3984 | USHORT usEncoderCap; | ||
3985 | struct { | ||
3986 | #if ATOM_BIG_ENDIAN | ||
3987 | USHORT usReserved:15; // Bit1-15 may be defined for other capability in future | ||
3988 | USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. | ||
3989 | #else | ||
3990 | USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. | ||
3991 | USHORT usReserved:15; // Bit1-15 may be defined for other capability in future | ||
3992 | #endif | ||
3993 | }; | ||
3994 | }; | ||
3995 | }ATOM_ENCODER_CAP_RECORD; | ||
3996 | |||
3444 | // value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle | 3997 | // value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle |
3445 | #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 | 3998 | #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 |
3446 | #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 | 3999 | #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 |
@@ -3580,6 +4133,11 @@ typedef struct _ATOM_VOLTAGE_CONTROL | |||
3580 | #define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI | 4133 | #define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI |
3581 | #define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage | 4134 | #define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage |
3582 | #define VOLTAGE_CONTROL_ID_DS4402 0x04 | 4135 | #define VOLTAGE_CONTROL_ID_DS4402 0x04 |
4136 | #define VOLTAGE_CONTROL_ID_UP6266 0x05 | ||
4137 | #define VOLTAGE_CONTROL_ID_SCORPIO 0x06 | ||
4138 | #define VOLTAGE_CONTROL_ID_VT1556M 0x07 | ||
4139 | #define VOLTAGE_CONTROL_ID_CHL822x 0x08 | ||
4140 | #define VOLTAGE_CONTROL_ID_VT1586M 0x09 | ||
3583 | 4141 | ||
3584 | typedef struct _ATOM_VOLTAGE_OBJECT | 4142 | typedef struct _ATOM_VOLTAGE_OBJECT |
3585 | { | 4143 | { |
@@ -3670,66 +4228,157 @@ typedef struct _ATOM_POWER_SOURCE_INFO | |||
3670 | #define POWER_SENSOR_GPIO 0x01 | 4228 | #define POWER_SENSOR_GPIO 0x01 |
3671 | #define POWER_SENSOR_I2C 0x02 | 4229 | #define POWER_SENSOR_I2C 0x02 |
3672 | 4230 | ||
4231 | typedef struct _ATOM_CLK_VOLT_CAPABILITY | ||
4232 | { | ||
4233 | ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table | ||
4234 | ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz | ||
4235 | }ATOM_CLK_VOLT_CAPABILITY; | ||
4236 | |||
4237 | typedef struct _ATOM_AVAILABLE_SCLK_LIST | ||
4238 | { | ||
4239 | ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz | ||
4240 | USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK | ||
4241 | USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK | ||
4242 | }ATOM_AVAILABLE_SCLK_LIST; | ||
4243 | |||
4244 | // ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition | ||
4245 | #define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0] | ||
4246 | |||
4247 | // this IntegrateSystemInfoTable is used for Liano/Ontario APU | ||
3673 | typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 | 4248 | typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 |
3674 | { | 4249 | { |
3675 | ATOM_COMMON_TABLE_HEADER sHeader; | 4250 | ATOM_COMMON_TABLE_HEADER sHeader; |
3676 | ULONG ulBootUpEngineClock; | 4251 | ULONG ulBootUpEngineClock; |
3677 | ULONG ulDentistVCOFreq; | 4252 | ULONG ulDentistVCOFreq; |
3678 | ULONG ulBootUpUMAClock; | 4253 | ULONG ulBootUpUMAClock; |
3679 | ULONG ulReserved1[8]; | 4254 | ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; |
3680 | ULONG ulBootUpReqDisplayVector; | 4255 | ULONG ulBootUpReqDisplayVector; |
3681 | ULONG ulOtherDisplayMisc; | 4256 | ULONG ulOtherDisplayMisc; |
3682 | ULONG ulGPUCapInfo; | 4257 | ULONG ulGPUCapInfo; |
3683 | ULONG ulReserved2[3]; | 4258 | ULONG ulSB_MMIO_Base_Addr; |
4259 | USHORT usRequestedPWMFreqInHz; | ||
4260 | UCHAR ucHtcTmpLmt; | ||
4261 | UCHAR ucHtcHystLmt; | ||
4262 | ULONG ulMinEngineClock; | ||
3684 | ULONG ulSystemConfig; | 4263 | ULONG ulSystemConfig; |
3685 | ULONG ulCPUCapInfo; | 4264 | ULONG ulCPUCapInfo; |
3686 | USHORT usMaxNBVoltage; | 4265 | USHORT usNBP0Voltage; |
3687 | USHORT usMinNBVoltage; | 4266 | USHORT usNBP1Voltage; |
3688 | USHORT usBootUpNBVoltage; | 4267 | USHORT usBootUpNBVoltage; |
3689 | USHORT usExtDispConnInfoOffset; | 4268 | USHORT usExtDispConnInfoOffset; |
3690 | UCHAR ucHtcTmpLmt; | 4269 | USHORT usPanelRefreshRateRange; |
3691 | UCHAR ucTjOffset; | ||
3692 | UCHAR ucMemoryType; | 4270 | UCHAR ucMemoryType; |
3693 | UCHAR ucUMAChannelNumber; | 4271 | UCHAR ucUMAChannelNumber; |
3694 | ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10]; | 4272 | ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10]; |
3695 | ULONG ulCSR_M3_ARB_CNTL_UVD[10]; | 4273 | ULONG ulCSR_M3_ARB_CNTL_UVD[10]; |
3696 | ULONG ulCSR_M3_ARB_CNTL_FS3D[10]; | 4274 | ULONG ulCSR_M3_ARB_CNTL_FS3D[10]; |
3697 | ULONG ulReserved3[42]; | 4275 | ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; |
4276 | ULONG ulGMCRestoreResetTime; | ||
4277 | ULONG ulMinimumNClk; | ||
4278 | ULONG ulIdleNClk; | ||
4279 | ULONG ulDDR_DLL_PowerUpTime; | ||
4280 | ULONG ulDDR_PLL_PowerUpTime; | ||
4281 | USHORT usPCIEClkSSPercentage; | ||
4282 | USHORT usPCIEClkSSType; | ||
4283 | USHORT usLvdsSSPercentage; | ||
4284 | USHORT usLvdsSSpreadRateIn10Hz; | ||
4285 | USHORT usHDMISSPercentage; | ||
4286 | USHORT usHDMISSpreadRateIn10Hz; | ||
4287 | USHORT usDVISSPercentage; | ||
4288 | USHORT usDVISSpreadRateIn10Hz; | ||
4289 | ULONG ulReserved3[21]; | ||
3698 | ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; | 4290 | ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; |
3699 | }ATOM_INTEGRATED_SYSTEM_INFO_V6; | 4291 | }ATOM_INTEGRATED_SYSTEM_INFO_V6; |
3700 | 4292 | ||
4293 | // ulGPUCapInfo | ||
4294 | #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 | ||
4295 | #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08 | ||
4296 | |||
4297 | // ulOtherDisplayMisc | ||
4298 | #define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01 | ||
4299 | |||
4300 | |||
3701 | /********************************************************************************************************************** | 4301 | /********************************************************************************************************************** |
3702 | // ATOM_INTEGRATED_SYSTEM_INFO_V6 Description | 4302 | ATOM_INTEGRATED_SYSTEM_INFO_V6 Description |
3703 | //ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. | 4303 | ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock |
3704 | //ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. | 4304 | ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. |
3705 | //ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. | 4305 | ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. |
3706 | //ulReserved1[8] Reserved by now, must be 0x0. | 4306 | sDISPCLK_Voltage: Report Display clock voltage requirement. |
3707 | //ulBootUpReqDisplayVector VBIOS boot up display IDs | 4307 | |
3708 | // ATOM_DEVICE_CRT1_SUPPORT 0x0001 | 4308 | ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects: |
3709 | // ATOM_DEVICE_CRT2_SUPPORT 0x0010 | 4309 | ATOM_DEVICE_CRT1_SUPPORT 0x0001 |
3710 | // ATOM_DEVICE_DFP1_SUPPORT 0x0008 | 4310 | ATOM_DEVICE_CRT2_SUPPORT 0x0010 |
3711 | // ATOM_DEVICE_DFP6_SUPPORT 0x0040 | 4311 | ATOM_DEVICE_DFP1_SUPPORT 0x0008 |
3712 | // ATOM_DEVICE_DFP2_SUPPORT 0x0080 | 4312 | ATOM_DEVICE_DFP6_SUPPORT 0x0040 |
3713 | // ATOM_DEVICE_DFP3_SUPPORT 0x0200 | 4313 | ATOM_DEVICE_DFP2_SUPPORT 0x0080 |
3714 | // ATOM_DEVICE_DFP4_SUPPORT 0x0400 | 4314 | ATOM_DEVICE_DFP3_SUPPORT 0x0200 |
3715 | // ATOM_DEVICE_DFP5_SUPPORT 0x0800 | 4315 | ATOM_DEVICE_DFP4_SUPPORT 0x0400 |
3716 | // ATOM_DEVICE_LCD1_SUPPORT 0x0002 | 4316 | ATOM_DEVICE_DFP5_SUPPORT 0x0800 |
3717 | //ulOtherDisplayMisc Other display related flags, not defined yet. | 4317 | ATOM_DEVICE_LCD1_SUPPORT 0x0002 |
3718 | //ulGPUCapInfo TBD | 4318 | ulOtherDisplayMisc: Other display related flags, not defined yet. |
3719 | //ulReserved2[3] must be 0x0 for the reserved. | 4319 | ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode. |
3720 | //ulSystemConfig TBD | 4320 | =1: TMDS/HDMI Coherent Mode use signel PLL mode. |
3721 | //ulCPUCapInfo TBD | 4321 | bit[3]=0: Enable HW AUX mode detection logic |
3722 | //usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. | 4322 | =1: Disable HW AUX mode dettion logic |
3723 | //usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. | 4323 | ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage. |
3724 | //usBootUpNBVoltage Boot up NB voltage in unit of mv. | 4324 | |
3725 | //ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register. | 4325 | usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). |
3726 | //ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed. | 4326 | Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0; |
3727 | //ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. | 4327 | |
3728 | //ucUMAChannelNumber System memory channel numbers. | 4328 | When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below: |
3729 | //usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table. | 4329 | 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use; |
3730 | //ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default | 4330 | VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result, |
3731 | //ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback. | 4331 | Changing BL using VBIOS function is functional in both driver and non-driver present environment; |
3732 | //ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications. | 4332 | and enabling VariBri under the driver environment from PP table is optional. |
4333 | |||
4334 | 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating | ||
4335 | that BL control from GPU is expected. | ||
4336 | VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1 | ||
4337 | Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but | ||
4338 | it's per platform | ||
4339 | and enabling VariBri under the driver environment from PP table is optional. | ||
4340 | |||
4341 | ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. | ||
4342 | Threshold on value to enter HTC_active state. | ||
4343 | ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt. | ||
4344 | To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt. | ||
4345 | ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings. | ||
4346 | ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled | ||
4347 | =1: PCIE Power Gating Enabled | ||
4348 | Bit[1]=0: DDR-DLL shut-down feature disabled. | ||
4349 | 1: DDR-DLL shut-down feature enabled. | ||
4350 | Bit[2]=0: DDR-PLL Power down feature disabled. | ||
4351 | 1: DDR-PLL Power down feature enabled. | ||
4352 | ulCPUCapInfo: TBD | ||
4353 | usNBP0Voltage: VID for voltage on NB P0 State | ||
4354 | usNBP1Voltage: VID for voltage on NB P1 State | ||
4355 | usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement. | ||
4356 | usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure | ||
4357 | usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set | ||
4358 | to indicate a range. | ||
4359 | SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 | ||
4360 | SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 | ||
4361 | SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 | ||
4362 | SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 | ||
4363 | ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. | ||
4364 | ucUMAChannelNumber: System memory channel numbers. | ||
4365 | ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default | ||
4366 | ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback. | ||
4367 | ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications. | ||
4368 | sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high | ||
4369 | ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. | ||
4370 | ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. | ||
4371 | ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz. | ||
4372 | ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns. | ||
4373 | ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns. | ||
4374 | usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%. | ||
4375 | usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread. | ||
4376 | usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. | ||
4377 | usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. | ||
4378 | usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. | ||
4379 | usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. | ||
4380 | usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. | ||
4381 | usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. | ||
3733 | **********************************************************************************************************************/ | 4382 | **********************************************************************************************************************/ |
3734 | 4383 | ||
3735 | /**************************************************************************/ | 4384 | /**************************************************************************/ |
@@ -3790,6 +4439,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT | |||
3790 | #define ASIC_INTERNAL_SS_ON_LVDS 6 | 4439 | #define ASIC_INTERNAL_SS_ON_LVDS 6 |
3791 | #define ASIC_INTERNAL_SS_ON_DP 7 | 4440 | #define ASIC_INTERNAL_SS_ON_DP 7 |
3792 | #define ASIC_INTERNAL_SS_ON_DCPLL 8 | 4441 | #define ASIC_INTERNAL_SS_ON_DCPLL 8 |
4442 | #define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9 | ||
3793 | 4443 | ||
3794 | typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 | 4444 | typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 |
3795 | { | 4445 | { |
@@ -3903,6 +4553,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 | |||
3903 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 | 4553 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 |
3904 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 | 4554 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 |
3905 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 | 4555 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 |
4556 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4 | ||
3906 | 4557 | ||
3907 | //Byte aligned defintion for BIOS usage | 4558 | //Byte aligned defintion for BIOS usage |
3908 | #define ATOM_S0_CRT1_MONOb0 0x01 | 4559 | #define ATOM_S0_CRT1_MONOb0 0x01 |
@@ -4529,7 +5180,8 @@ typedef struct _ATOM_INIT_REG_BLOCK{ | |||
4529 | #define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1) | 5180 | #define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1) |
4530 | #define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) | 5181 | #define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) |
4531 | #define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) | 5182 | #define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) |
4532 | 5183 | //#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code | |
5184 | #define ACCESS_PLACEHOLDER 0x80 | ||
4533 | 5185 | ||
4534 | typedef struct _ATOM_MC_INIT_PARAM_TABLE | 5186 | typedef struct _ATOM_MC_INIT_PARAM_TABLE |
4535 | { | 5187 | { |
@@ -4554,6 +5206,10 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE | |||
4554 | #define _32Mx32 0x33 | 5206 | #define _32Mx32 0x33 |
4555 | #define _64Mx8 0x41 | 5207 | #define _64Mx8 0x41 |
4556 | #define _64Mx16 0x42 | 5208 | #define _64Mx16 0x42 |
5209 | #define _64Mx32 0x43 | ||
5210 | #define _128Mx8 0x51 | ||
5211 | #define _128Mx16 0x52 | ||
5212 | #define _256Mx8 0x61 | ||
4557 | 5213 | ||
4558 | #define SAMSUNG 0x1 | 5214 | #define SAMSUNG 0x1 |
4559 | #define INFINEON 0x2 | 5215 | #define INFINEON 0x2 |
@@ -4569,10 +5225,11 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE | |||
4569 | #define QIMONDA INFINEON | 5225 | #define QIMONDA INFINEON |
4570 | #define PROMOS MOSEL | 5226 | #define PROMOS MOSEL |
4571 | #define KRETON INFINEON | 5227 | #define KRETON INFINEON |
5228 | #define ELIXIR NANYA | ||
4572 | 5229 | ||
4573 | /////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// | 5230 | /////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// |
4574 | 5231 | ||
4575 | #define UCODE_ROM_START_ADDRESS 0x1c000 | 5232 | #define UCODE_ROM_START_ADDRESS 0x1b800 |
4576 | #define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode | 5233 | #define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode |
4577 | 5234 | ||
4578 | //uCode block header for reference | 5235 | //uCode block header for reference |
@@ -4903,7 +5560,34 @@ typedef struct _ATOM_VRAM_MODULE_V6 | |||
4903 | ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock | 5560 | ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock |
4904 | }ATOM_VRAM_MODULE_V6; | 5561 | }ATOM_VRAM_MODULE_V6; |
4905 | 5562 | ||
4906 | 5563 | typedef struct _ATOM_VRAM_MODULE_V7 | |
5564 | { | ||
5565 | // Design Specific Values | ||
5566 | ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP | ||
5567 | USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7 | ||
5568 | USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) | ||
5569 | USHORT usReserved; | ||
5570 | UCHAR ucExtMemoryID; // Current memory module ID | ||
5571 | UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5 | ||
5572 | UCHAR ucChannelNum; // Number of mem. channels supported in this module | ||
5573 | UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT | ||
5574 | UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 | ||
5575 | UCHAR ucReserve; // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now. | ||
5576 | UCHAR ucMisc; // RANK_OF_THISMEMORY etc. | ||
5577 | UCHAR ucVREFI; // Not used. | ||
5578 | UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2. | ||
5579 | UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble | ||
5580 | UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros | ||
5581 | UCHAR ucReserved[3]; | ||
5582 | // Memory Module specific values | ||
5583 | USHORT usEMRS2Value; // EMRS2/MR2 Value. | ||
5584 | USHORT usEMRS3Value; // EMRS3/MR3 Value. | ||
5585 | UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code | ||
5586 | UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) | ||
5587 | UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory | ||
5588 | UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth | ||
5589 | char strMemPNString[20]; // part number end with '0'. | ||
5590 | }ATOM_VRAM_MODULE_V7; | ||
4907 | 5591 | ||
4908 | typedef struct _ATOM_VRAM_INFO_V2 | 5592 | typedef struct _ATOM_VRAM_INFO_V2 |
4909 | { | 5593 | { |
@@ -4942,6 +5626,20 @@ typedef struct _ATOM_VRAM_INFO_V4 | |||
4942 | // ATOM_INIT_REG_BLOCK aMemAdjust; | 5626 | // ATOM_INIT_REG_BLOCK aMemAdjust; |
4943 | }ATOM_VRAM_INFO_V4; | 5627 | }ATOM_VRAM_INFO_V4; |
4944 | 5628 | ||
5629 | typedef struct _ATOM_VRAM_INFO_HEADER_V2_1 | ||
5630 | { | ||
5631 | ATOM_COMMON_TABLE_HEADER sHeader; | ||
5632 | USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting | ||
5633 | USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting | ||
5634 | USHORT usReserved[4]; | ||
5635 | UCHAR ucNumOfVRAMModule; // indicate number of VRAM module | ||
5636 | UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list | ||
5637 | UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version | ||
5638 | UCHAR ucReserved; | ||
5639 | ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; | ||
5640 | }ATOM_VRAM_INFO_HEADER_V2_1; | ||
5641 | |||
5642 | |||
4945 | typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO | 5643 | typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO |
4946 | { | 5644 | { |
4947 | ATOM_COMMON_TABLE_HEADER sHeader; | 5645 | ATOM_COMMON_TABLE_HEADER sHeader; |
@@ -5182,6 +5880,16 @@ typedef struct _ASIC_TRANSMITTER_INFO | |||
5182 | UCHAR ucReserved; | 5880 | UCHAR ucReserved; |
5183 | }ASIC_TRANSMITTER_INFO; | 5881 | }ASIC_TRANSMITTER_INFO; |
5184 | 5882 | ||
5883 | #define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01 | ||
5884 | #define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02 | ||
5885 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4 | ||
5886 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00 | ||
5887 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04 | ||
5888 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40 | ||
5889 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44 | ||
5890 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80 | ||
5891 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84 | ||
5892 | |||
5185 | typedef struct _ASIC_ENCODER_INFO | 5893 | typedef struct _ASIC_ENCODER_INFO |
5186 | { | 5894 | { |
5187 | UCHAR ucEncoderID; | 5895 | UCHAR ucEncoderID; |
@@ -5284,6 +5992,28 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS | |||
5284 | /* /obselete */ | 5992 | /* /obselete */ |
5285 | #define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS | 5993 | #define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS |
5286 | 5994 | ||
5995 | |||
5996 | typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2 | ||
5997 | { | ||
5998 | USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION | ||
5999 | UCHAR ucAuxId; | ||
6000 | UCHAR ucAction; | ||
6001 | UCHAR ucSinkType; // Iput and Output parameters. | ||
6002 | UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION | ||
6003 | UCHAR ucReserved[2]; | ||
6004 | }DP_ENCODER_SERVICE_PARAMETERS_V2; | ||
6005 | |||
6006 | typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2 | ||
6007 | { | ||
6008 | DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam; | ||
6009 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam; | ||
6010 | }DP_ENCODER_SERVICE_PS_ALLOCATION_V2; | ||
6011 | |||
6012 | // ucAction | ||
6013 | #define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01 | ||
6014 | #define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02 | ||
6015 | |||
6016 | |||
5287 | // DP_TRAINING_TABLE | 6017 | // DP_TRAINING_TABLE |
5288 | #define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR | 6018 | #define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR |
5289 | #define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) | 6019 | #define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) |
@@ -5339,6 +6069,7 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2 | |||
5339 | #define SELECT_DCIO_IMPCAL 4 | 6069 | #define SELECT_DCIO_IMPCAL 4 |
5340 | #define SELECT_DCIO_DIG 6 | 6070 | #define SELECT_DCIO_DIG 6 |
5341 | #define SELECT_CRTC_PIXEL_RATE 7 | 6071 | #define SELECT_CRTC_PIXEL_RATE 7 |
6072 | #define SELECT_VGA_BLK 8 | ||
5342 | 6073 | ||
5343 | /****************************************************************************/ | 6074 | /****************************************************************************/ |
5344 | //Portion VI: Definitinos for vbios MC scratch registers that driver used | 6075 | //Portion VI: Definitinos for vbios MC scratch registers that driver used |
@@ -5744,7 +6475,17 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER | |||
5744 | #define ATOM_PP_THERMALCONTROLLER_ADT7473 9 | 6475 | #define ATOM_PP_THERMALCONTROLLER_ADT7473 9 |
5745 | #define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 | 6476 | #define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 |
5746 | #define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 | 6477 | #define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 |
6478 | #define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen. | ||
6479 | #define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally | ||
6480 | #define ATOM_PP_THERMALCONTROLLER_NISLANDS 15 | ||
6481 | |||
6482 | // Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal. | ||
6483 | // We probably should reserve the bit 0x80 for this use. | ||
6484 | // To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here). | ||
6485 | // The driver can pick the correct internal controller based on the ASIC. | ||
6486 | |||
5747 | #define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller | 6487 | #define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller |
6488 | #define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller | ||
5748 | 6489 | ||
5749 | typedef struct _ATOM_PPLIB_STATE | 6490 | typedef struct _ATOM_PPLIB_STATE |
5750 | { | 6491 | { |
@@ -5841,6 +6582,29 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 | |||
5841 | USHORT usExtendendedHeaderOffset; | 6582 | USHORT usExtendendedHeaderOffset; |
5842 | } ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; | 6583 | } ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; |
5843 | 6584 | ||
6585 | typedef struct _ATOM_PPLIB_POWERPLAYTABLE4 | ||
6586 | { | ||
6587 | ATOM_PPLIB_POWERPLAYTABLE3 basicTable3; | ||
6588 | ULONG ulGoldenPPID; // PPGen use only | ||
6589 | ULONG ulGoldenRevision; // PPGen use only | ||
6590 | USHORT usVddcDependencyOnSCLKOffset; | ||
6591 | USHORT usVddciDependencyOnMCLKOffset; | ||
6592 | USHORT usVddcDependencyOnMCLKOffset; | ||
6593 | USHORT usMaxClockVoltageOnDCOffset; | ||
6594 | USHORT usReserved[2]; | ||
6595 | } ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4; | ||
6596 | |||
6597 | typedef struct _ATOM_PPLIB_POWERPLAYTABLE5 | ||
6598 | { | ||
6599 | ATOM_PPLIB_POWERPLAYTABLE4 basicTable4; | ||
6600 | ULONG ulTDPLimit; | ||
6601 | ULONG ulNearTDPLimit; | ||
6602 | ULONG ulSQRampingThreshold; | ||
6603 | USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table | ||
6604 | ULONG ulCACLeakage; // TBD, this parameter is still under discussion. Change to ulReserved if not needed. | ||
6605 | ULONG ulReserved; | ||
6606 | } ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5; | ||
6607 | |||
5844 | //// ATOM_PPLIB_NONCLOCK_INFO::usClassification | 6608 | //// ATOM_PPLIB_NONCLOCK_INFO::usClassification |
5845 | #define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 | 6609 | #define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 |
5846 | #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 | 6610 | #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 |
@@ -5864,6 +6628,10 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 | |||
5864 | #define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 | 6628 | #define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 |
5865 | #define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 | 6629 | #define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 |
5866 | 6630 | ||
6631 | //// ATOM_PPLIB_NONCLOCK_INFO::usClassification2 | ||
6632 | #define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001 | ||
6633 | #define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002 | ||
6634 | |||
5867 | //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings | 6635 | //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings |
5868 | #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 | 6636 | #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 |
5869 | #define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 | 6637 | #define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 |
@@ -5896,9 +6664,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 | |||
5896 | #define ATOM_PPLIB_M3ARB_MASK 0x00060000 | 6664 | #define ATOM_PPLIB_M3ARB_MASK 0x00060000 |
5897 | #define ATOM_PPLIB_M3ARB_SHIFT 17 | 6665 | #define ATOM_PPLIB_M3ARB_SHIFT 17 |
5898 | 6666 | ||
6667 | #define ATOM_PPLIB_ENABLE_DRR 0x00080000 | ||
6668 | |||
6669 | // remaining 16 bits are reserved | ||
6670 | typedef struct _ATOM_PPLIB_THERMAL_STATE | ||
6671 | { | ||
6672 | UCHAR ucMinTemperature; | ||
6673 | UCHAR ucMaxTemperature; | ||
6674 | UCHAR ucThermalAction; | ||
6675 | }ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE; | ||
6676 | |||
5899 | // Contained in an array starting at the offset | 6677 | // Contained in an array starting at the offset |
5900 | // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. | 6678 | // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. |
5901 | // referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex | 6679 | // referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex |
6680 | #define ATOM_PPLIB_NONCLOCKINFO_VER1 12 | ||
6681 | #define ATOM_PPLIB_NONCLOCKINFO_VER2 24 | ||
5902 | typedef struct _ATOM_PPLIB_NONCLOCK_INFO | 6682 | typedef struct _ATOM_PPLIB_NONCLOCK_INFO |
5903 | { | 6683 | { |
5904 | USHORT usClassification; | 6684 | USHORT usClassification; |
@@ -5906,15 +6686,15 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO | |||
5906 | UCHAR ucMaxTemperature; | 6686 | UCHAR ucMaxTemperature; |
5907 | ULONG ulCapsAndSettings; | 6687 | ULONG ulCapsAndSettings; |
5908 | UCHAR ucRequiredPower; | 6688 | UCHAR ucRequiredPower; |
5909 | UCHAR ucUnused1[3]; | 6689 | USHORT usClassification2; |
6690 | ULONG ulVCLK; | ||
6691 | ULONG ulDCLK; | ||
6692 | UCHAR ucUnused[5]; | ||
5910 | } ATOM_PPLIB_NONCLOCK_INFO; | 6693 | } ATOM_PPLIB_NONCLOCK_INFO; |
5911 | 6694 | ||
5912 | // Contained in an array starting at the offset | 6695 | // Contained in an array starting at the offset |
5913 | // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. | 6696 | // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. |
5914 | // referenced from ATOM_PPLIB_STATE::ucClockStateIndices | 6697 | // referenced from ATOM_PPLIB_STATE::ucClockStateIndices |
5915 | #define ATOM_PPLIB_NONCLOCKINFO_VER1 12 | ||
5916 | #define ATOM_PPLIB_NONCLOCKINFO_VER2 24 | ||
5917 | |||
5918 | typedef struct _ATOM_PPLIB_R600_CLOCK_INFO | 6698 | typedef struct _ATOM_PPLIB_R600_CLOCK_INFO |
5919 | { | 6699 | { |
5920 | USHORT usEngineClockLow; | 6700 | USHORT usEngineClockLow; |
@@ -5985,6 +6765,93 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO | |||
5985 | #define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 | 6765 | #define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 |
5986 | #define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 | 6766 | #define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 |
5987 | 6767 | ||
6768 | typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{ | ||
6769 | USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz | ||
6770 | UCHAR ucEngineClockHigh; //clockfrequency >> 16. | ||
6771 | UCHAR vddcIndex; //2-bit vddc index; | ||
6772 | UCHAR leakage; //please use 8-bit absolute value, not the 6-bit % value | ||
6773 | //please initalize to 0 | ||
6774 | UCHAR rsv; | ||
6775 | //please initalize to 0 | ||
6776 | USHORT rsv1; | ||
6777 | //please initialize to 0s | ||
6778 | ULONG rsv2[2]; | ||
6779 | }ATOM_PPLIB_SUMO_CLOCK_INFO; | ||
6780 | |||
6781 | |||
6782 | |||
6783 | typedef struct _ATOM_PPLIB_STATE_V2 | ||
6784 | { | ||
6785 | //number of valid dpm levels in this state; Driver uses it to calculate the whole | ||
6786 | //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) | ||
6787 | UCHAR ucNumDPMLevels; | ||
6788 | |||
6789 | //a index to the array of nonClockInfos | ||
6790 | UCHAR nonClockInfoIndex; | ||
6791 | /** | ||
6792 | * Driver will read the first ucNumDPMLevels in this array | ||
6793 | */ | ||
6794 | UCHAR clockInfoIndex[1]; | ||
6795 | } ATOM_PPLIB_STATE_V2; | ||
6796 | |||
6797 | typedef struct StateArray{ | ||
6798 | //how many states we have | ||
6799 | UCHAR ucNumEntries; | ||
6800 | |||
6801 | ATOM_PPLIB_STATE_V2 states[1]; | ||
6802 | }StateArray; | ||
6803 | |||
6804 | |||
6805 | typedef struct ClockInfoArray{ | ||
6806 | //how many clock levels we have | ||
6807 | UCHAR ucNumEntries; | ||
6808 | |||
6809 | //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO) | ||
6810 | UCHAR ucEntrySize; | ||
6811 | |||
6812 | //this is for Sumo | ||
6813 | ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1]; | ||
6814 | }ClockInfoArray; | ||
6815 | |||
6816 | typedef struct NonClockInfoArray{ | ||
6817 | |||
6818 | //how many non-clock levels we have. normally should be same as number of states | ||
6819 | UCHAR ucNumEntries; | ||
6820 | //sizeof(ATOM_PPLIB_NONCLOCK_INFO) | ||
6821 | UCHAR ucEntrySize; | ||
6822 | |||
6823 | ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; | ||
6824 | }NonClockInfoArray; | ||
6825 | |||
6826 | typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record | ||
6827 | { | ||
6828 | USHORT usClockLow; | ||
6829 | UCHAR ucClockHigh; | ||
6830 | USHORT usVoltage; | ||
6831 | }ATOM_PPLIB_Clock_Voltage_Dependency_Record; | ||
6832 | |||
6833 | typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table | ||
6834 | { | ||
6835 | UCHAR ucNumEntries; // Number of entries. | ||
6836 | ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. | ||
6837 | }ATOM_PPLIB_Clock_Voltage_Dependency_Table; | ||
6838 | |||
6839 | typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record | ||
6840 | { | ||
6841 | USHORT usSclkLow; | ||
6842 | UCHAR ucSclkHigh; | ||
6843 | USHORT usMclkLow; | ||
6844 | UCHAR ucMclkHigh; | ||
6845 | USHORT usVddc; | ||
6846 | USHORT usVddci; | ||
6847 | }ATOM_PPLIB_Clock_Voltage_Limit_Record; | ||
6848 | |||
6849 | typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table | ||
6850 | { | ||
6851 | UCHAR ucNumEntries; // Number of entries. | ||
6852 | ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. | ||
6853 | }ATOM_PPLIB_Clock_Voltage_Limit_Table; | ||
6854 | |||
5988 | /**************************************************************************/ | 6855 | /**************************************************************************/ |
5989 | 6856 | ||
5990 | 6857 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4dc5b4714c5a..f7d7477daffb 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -40,6 +40,61 @@ | |||
40 | static void evergreen_gpu_init(struct radeon_device *rdev); | 40 | static void evergreen_gpu_init(struct radeon_device *rdev); |
41 | void evergreen_fini(struct radeon_device *rdev); | 41 | void evergreen_fini(struct radeon_device *rdev); |
42 | 42 | ||
43 | void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) | ||
44 | { | ||
45 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; | ||
46 | u32 tmp; | ||
47 | |||
48 | /* make sure flip is at vb rather than hb */ | ||
49 | tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); | ||
50 | tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; | ||
51 | WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); | ||
52 | |||
53 | /* set pageflip to happen anywhere in vblank interval */ | ||
54 | WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); | ||
55 | |||
56 | /* enable the pflip int */ | ||
57 | radeon_irq_kms_pflip_irq_get(rdev, crtc); | ||
58 | } | ||
59 | |||
60 | void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) | ||
61 | { | ||
62 | /* disable the pflip int */ | ||
63 | radeon_irq_kms_pflip_irq_put(rdev, crtc); | ||
64 | } | ||
65 | |||
66 | u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | ||
67 | { | ||
68 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
69 | u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); | ||
70 | |||
71 | /* Lock the graphics update lock */ | ||
72 | tmp |= EVERGREEN_GRPH_UPDATE_LOCK; | ||
73 | WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
74 | |||
75 | /* update the scanout addresses */ | ||
76 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, | ||
77 | upper_32_bits(crtc_base)); | ||
78 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
79 | (u32)crtc_base); | ||
80 | |||
81 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, | ||
82 | upper_32_bits(crtc_base)); | ||
83 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
84 | (u32)crtc_base); | ||
85 | |||
86 | /* Wait for update_pending to go high. */ | ||
87 | while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); | ||
88 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
89 | |||
90 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
91 | tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; | ||
92 | WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
93 | |||
94 | /* Return current update_pending status: */ | ||
95 | return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; | ||
96 | } | ||
97 | |||
43 | /* get temperature in millidegrees */ | 98 | /* get temperature in millidegrees */ |
44 | u32 evergreen_get_temp(struct radeon_device *rdev) | 99 | u32 evergreen_get_temp(struct radeon_device *rdev) |
45 | { | 100 | { |
@@ -57,6 +112,14 @@ u32 evergreen_get_temp(struct radeon_device *rdev) | |||
57 | return actual_temp * 1000; | 112 | return actual_temp * 1000; |
58 | } | 113 | } |
59 | 114 | ||
115 | u32 sumo_get_temp(struct radeon_device *rdev) | ||
116 | { | ||
117 | u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; | ||
118 | u32 actual_temp = (temp >> 1) & 0xff; | ||
119 | |||
120 | return actual_temp * 1000; | ||
121 | } | ||
122 | |||
60 | void evergreen_pm_misc(struct radeon_device *rdev) | 123 | void evergreen_pm_misc(struct radeon_device *rdev) |
61 | { | 124 | { |
62 | int req_ps_idx = rdev->pm.requested_power_state_index; | 125 | int req_ps_idx = rdev->pm.requested_power_state_index; |
@@ -888,31 +951,39 @@ static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_sa | |||
888 | save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); | 951 | save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); |
889 | save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); | 952 | save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); |
890 | save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); | 953 | save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); |
891 | save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); | 954 | if (!(rdev->flags & RADEON_IS_IGP)) { |
892 | save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); | 955 | save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); |
893 | save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); | 956 | save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); |
894 | save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | 957 | save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); |
958 | save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
959 | } | ||
895 | 960 | ||
896 | /* Stop all video */ | 961 | /* Stop all video */ |
897 | WREG32(VGA_RENDER_CONTROL, 0); | 962 | WREG32(VGA_RENDER_CONTROL, 0); |
898 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); | 963 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); |
899 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); | 964 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); |
900 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); | 965 | if (!(rdev->flags & RADEON_IS_IGP)) { |
901 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); | 966 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); |
902 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); | 967 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); |
903 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); | 968 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); |
969 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); | ||
970 | } | ||
904 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 971 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
905 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 972 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
906 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | 973 | if (!(rdev->flags & RADEON_IS_IGP)) { |
907 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | 974 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); |
908 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 975 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); |
909 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 976 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); |
977 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
978 | } | ||
910 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 979 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
911 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 980 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
912 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | 981 | if (!(rdev->flags & RADEON_IS_IGP)) { |
913 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | 982 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); |
914 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 983 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); |
915 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 984 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); |
985 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
986 | } | ||
916 | 987 | ||
917 | WREG32(D1VGA_CONTROL, 0); | 988 | WREG32(D1VGA_CONTROL, 0); |
918 | WREG32(D2VGA_CONTROL, 0); | 989 | WREG32(D2VGA_CONTROL, 0); |
@@ -942,41 +1013,43 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_ | |||
942 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, | 1013 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, |
943 | (u32)rdev->mc.vram_start); | 1014 | (u32)rdev->mc.vram_start); |
944 | 1015 | ||
945 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, | 1016 | if (!(rdev->flags & RADEON_IS_IGP)) { |
946 | upper_32_bits(rdev->mc.vram_start)); | 1017 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, |
947 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, | 1018 | upper_32_bits(rdev->mc.vram_start)); |
948 | upper_32_bits(rdev->mc.vram_start)); | 1019 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, |
949 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, | 1020 | upper_32_bits(rdev->mc.vram_start)); |
950 | (u32)rdev->mc.vram_start); | 1021 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, |
951 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, | 1022 | (u32)rdev->mc.vram_start); |
952 | (u32)rdev->mc.vram_start); | 1023 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, |
953 | 1024 | (u32)rdev->mc.vram_start); | |
954 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, | 1025 | |
955 | upper_32_bits(rdev->mc.vram_start)); | 1026 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, |
956 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, | 1027 | upper_32_bits(rdev->mc.vram_start)); |
957 | upper_32_bits(rdev->mc.vram_start)); | 1028 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, |
958 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, | 1029 | upper_32_bits(rdev->mc.vram_start)); |
959 | (u32)rdev->mc.vram_start); | 1030 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, |
960 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, | 1031 | (u32)rdev->mc.vram_start); |
961 | (u32)rdev->mc.vram_start); | 1032 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, |
962 | 1033 | (u32)rdev->mc.vram_start); | |
963 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, | 1034 | |
964 | upper_32_bits(rdev->mc.vram_start)); | 1035 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, |
965 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, | 1036 | upper_32_bits(rdev->mc.vram_start)); |
966 | upper_32_bits(rdev->mc.vram_start)); | 1037 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, |
967 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, | 1038 | upper_32_bits(rdev->mc.vram_start)); |
968 | (u32)rdev->mc.vram_start); | 1039 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, |
969 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, | 1040 | (u32)rdev->mc.vram_start); |
970 | (u32)rdev->mc.vram_start); | 1041 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, |
971 | 1042 | (u32)rdev->mc.vram_start); | |
972 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, | 1043 | |
973 | upper_32_bits(rdev->mc.vram_start)); | 1044 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, |
974 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, | 1045 | upper_32_bits(rdev->mc.vram_start)); |
975 | upper_32_bits(rdev->mc.vram_start)); | 1046 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, |
976 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, | 1047 | upper_32_bits(rdev->mc.vram_start)); |
977 | (u32)rdev->mc.vram_start); | 1048 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, |
978 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, | 1049 | (u32)rdev->mc.vram_start); |
979 | (u32)rdev->mc.vram_start); | 1050 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, |
1051 | (u32)rdev->mc.vram_start); | ||
1052 | } | ||
980 | 1053 | ||
981 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); | 1054 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); |
982 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); | 1055 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); |
@@ -992,22 +1065,28 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_ | |||
992 | WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); | 1065 | WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); |
993 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); | 1066 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); |
994 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); | 1067 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); |
995 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); | 1068 | if (!(rdev->flags & RADEON_IS_IGP)) { |
996 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); | 1069 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); |
997 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); | 1070 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); |
998 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); | 1071 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); |
1072 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); | ||
1073 | } | ||
999 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); | 1074 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); |
1000 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); | 1075 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); |
1001 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); | 1076 | if (!(rdev->flags & RADEON_IS_IGP)) { |
1002 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); | 1077 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); |
1003 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); | 1078 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); |
1004 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); | 1079 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); |
1080 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); | ||
1081 | } | ||
1005 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 1082 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
1006 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 1083 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
1007 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | 1084 | if (!(rdev->flags & RADEON_IS_IGP)) { |
1008 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | 1085 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); |
1009 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 1086 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); |
1010 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 1087 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); |
1088 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
1089 | } | ||
1011 | WREG32(VGA_RENDER_CONTROL, save->vga_render_control); | 1090 | WREG32(VGA_RENDER_CONTROL, save->vga_render_control); |
1012 | } | 1091 | } |
1013 | 1092 | ||
@@ -1055,6 +1134,12 @@ static void evergreen_mc_program(struct radeon_device *rdev) | |||
1055 | rdev->mc.vram_end >> 12); | 1134 | rdev->mc.vram_end >> 12); |
1056 | } | 1135 | } |
1057 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 1136 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); |
1137 | if (rdev->flags & RADEON_IS_IGP) { | ||
1138 | tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; | ||
1139 | tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; | ||
1140 | tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20; | ||
1141 | WREG32(MC_FUS_VM_FB_OFFSET, tmp); | ||
1142 | } | ||
1058 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; | 1143 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
1059 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 1144 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
1060 | WREG32(MC_VM_FB_LOCATION, tmp); | 1145 | WREG32(MC_VM_FB_LOCATION, tmp); |
@@ -1283,6 +1368,7 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
1283 | switch (rdev->family) { | 1368 | switch (rdev->family) { |
1284 | case CHIP_CEDAR: | 1369 | case CHIP_CEDAR: |
1285 | case CHIP_REDWOOD: | 1370 | case CHIP_REDWOOD: |
1371 | case CHIP_PALM: | ||
1286 | force_no_swizzle = false; | 1372 | force_no_swizzle = false; |
1287 | break; | 1373 | break; |
1288 | case CHIP_CYPRESS: | 1374 | case CHIP_CYPRESS: |
@@ -1382,6 +1468,43 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
1382 | return backend_map; | 1468 | return backend_map; |
1383 | } | 1469 | } |
1384 | 1470 | ||
1471 | static void evergreen_program_channel_remap(struct radeon_device *rdev) | ||
1472 | { | ||
1473 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
1474 | |||
1475 | tmp = RREG32(MC_SHARED_CHMAP); | ||
1476 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
1477 | case 0: | ||
1478 | case 1: | ||
1479 | case 2: | ||
1480 | case 3: | ||
1481 | default: | ||
1482 | /* default mapping */ | ||
1483 | mc_shared_chremap = 0x00fac688; | ||
1484 | break; | ||
1485 | } | ||
1486 | |||
1487 | switch (rdev->family) { | ||
1488 | case CHIP_HEMLOCK: | ||
1489 | case CHIP_CYPRESS: | ||
1490 | tcp_chan_steer_lo = 0x54763210; | ||
1491 | tcp_chan_steer_hi = 0x0000ba98; | ||
1492 | break; | ||
1493 | case CHIP_JUNIPER: | ||
1494 | case CHIP_REDWOOD: | ||
1495 | case CHIP_CEDAR: | ||
1496 | case CHIP_PALM: | ||
1497 | default: | ||
1498 | tcp_chan_steer_lo = 0x76543210; | ||
1499 | tcp_chan_steer_hi = 0x0000ba98; | ||
1500 | break; | ||
1501 | } | ||
1502 | |||
1503 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
1504 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
1505 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
1506 | } | ||
1507 | |||
1385 | static void evergreen_gpu_init(struct radeon_device *rdev) | 1508 | static void evergreen_gpu_init(struct radeon_device *rdev) |
1386 | { | 1509 | { |
1387 | u32 cc_rb_backend_disable = 0; | 1510 | u32 cc_rb_backend_disable = 0; |
@@ -1493,6 +1616,27 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1493 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1616 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1494 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1617 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1495 | break; | 1618 | break; |
1619 | case CHIP_PALM: | ||
1620 | rdev->config.evergreen.num_ses = 1; | ||
1621 | rdev->config.evergreen.max_pipes = 2; | ||
1622 | rdev->config.evergreen.max_tile_pipes = 2; | ||
1623 | rdev->config.evergreen.max_simds = 2; | ||
1624 | rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; | ||
1625 | rdev->config.evergreen.max_gprs = 256; | ||
1626 | rdev->config.evergreen.max_threads = 192; | ||
1627 | rdev->config.evergreen.max_gs_threads = 16; | ||
1628 | rdev->config.evergreen.max_stack_entries = 256; | ||
1629 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
1630 | rdev->config.evergreen.sx_max_export_size = 128; | ||
1631 | rdev->config.evergreen.sx_max_export_pos_size = 32; | ||
1632 | rdev->config.evergreen.sx_max_export_smx_size = 96; | ||
1633 | rdev->config.evergreen.max_hw_contexts = 4; | ||
1634 | rdev->config.evergreen.sq_num_cf_insts = 1; | ||
1635 | |||
1636 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | ||
1637 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
1638 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
1639 | break; | ||
1496 | } | 1640 | } |
1497 | 1641 | ||
1498 | /* Initialize HDP */ | 1642 | /* Initialize HDP */ |
@@ -1685,6 +1829,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1685 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 1829 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
1686 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 1830 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
1687 | 1831 | ||
1832 | evergreen_program_channel_remap(rdev); | ||
1833 | |||
1688 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; | 1834 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; |
1689 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; | 1835 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; |
1690 | 1836 | ||
@@ -1767,9 +1913,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1767 | GS_PRIO(2) | | 1913 | GS_PRIO(2) | |
1768 | ES_PRIO(3)); | 1914 | ES_PRIO(3)); |
1769 | 1915 | ||
1770 | if (rdev->family == CHIP_CEDAR) | 1916 | switch (rdev->family) { |
1917 | case CHIP_CEDAR: | ||
1918 | case CHIP_PALM: | ||
1771 | /* no vertex cache */ | 1919 | /* no vertex cache */ |
1772 | sq_config &= ~VC_ENABLE; | 1920 | sq_config &= ~VC_ENABLE; |
1921 | break; | ||
1922 | default: | ||
1923 | break; | ||
1924 | } | ||
1773 | 1925 | ||
1774 | sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); | 1926 | sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); |
1775 | 1927 | ||
@@ -1781,10 +1933,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1781 | sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); | 1933 | sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); |
1782 | sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); | 1934 | sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); |
1783 | 1935 | ||
1784 | if (rdev->family == CHIP_CEDAR) | 1936 | switch (rdev->family) { |
1937 | case CHIP_CEDAR: | ||
1938 | case CHIP_PALM: | ||
1785 | ps_thread_count = 96; | 1939 | ps_thread_count = 96; |
1786 | else | 1940 | break; |
1941 | default: | ||
1787 | ps_thread_count = 128; | 1942 | ps_thread_count = 128; |
1943 | break; | ||
1944 | } | ||
1788 | 1945 | ||
1789 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); | 1946 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); |
1790 | sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); | 1947 | sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
@@ -1815,10 +1972,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1815 | WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | | 1972 | WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | |
1816 | FORCE_EOV_MAX_REZ_CNT(255))); | 1973 | FORCE_EOV_MAX_REZ_CNT(255))); |
1817 | 1974 | ||
1818 | if (rdev->family == CHIP_CEDAR) | 1975 | switch (rdev->family) { |
1976 | case CHIP_CEDAR: | ||
1977 | case CHIP_PALM: | ||
1819 | vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); | 1978 | vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); |
1820 | else | 1979 | break; |
1980 | default: | ||
1821 | vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); | 1981 | vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); |
1982 | break; | ||
1983 | } | ||
1822 | vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); | 1984 | vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); |
1823 | WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); | 1985 | WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); |
1824 | 1986 | ||
@@ -1902,12 +2064,18 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
1902 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | 2064 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
1903 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | 2065 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
1904 | /* Setup GPU memory space */ | 2066 | /* Setup GPU memory space */ |
1905 | /* size in MB on evergreen */ | 2067 | if (rdev->flags & RADEON_IS_IGP) { |
1906 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 2068 | /* size in bytes on fusion */ |
1907 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 2069 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
2070 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | ||
2071 | } else { | ||
2072 | /* size in MB on evergreen */ | ||
2073 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | ||
2074 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | ||
2075 | } | ||
1908 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 2076 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
1909 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 2077 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; |
1910 | r600_vram_gtt_location(rdev, &rdev->mc); | 2078 | r700_vram_gtt_location(rdev, &rdev->mc); |
1911 | radeon_update_bandwidth_info(rdev); | 2079 | radeon_update_bandwidth_info(rdev); |
1912 | 2080 | ||
1913 | return 0; | 2081 | return 0; |
@@ -2024,17 +2192,21 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) | |||
2024 | WREG32(GRBM_INT_CNTL, 0); | 2192 | WREG32(GRBM_INT_CNTL, 0); |
2025 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 2193 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
2026 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 2194 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
2027 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | 2195 | if (!(rdev->flags & RADEON_IS_IGP)) { |
2028 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | 2196 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); |
2029 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 2197 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); |
2030 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 2198 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); |
2199 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
2200 | } | ||
2031 | 2201 | ||
2032 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 2202 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
2033 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 2203 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
2034 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | 2204 | if (!(rdev->flags & RADEON_IS_IGP)) { |
2035 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | 2205 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); |
2036 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 2206 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); |
2037 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 2207 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); |
2208 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
2209 | } | ||
2038 | 2210 | ||
2039 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); | 2211 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); |
2040 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); | 2212 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); |
@@ -2060,6 +2232,7 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
2060 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | 2232 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; |
2061 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; | 2233 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; |
2062 | u32 grbm_int_cntl = 0; | 2234 | u32 grbm_int_cntl = 0; |
2235 | u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; | ||
2063 | 2236 | ||
2064 | if (!rdev->irq.installed) { | 2237 | if (!rdev->irq.installed) { |
2065 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); | 2238 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
@@ -2085,27 +2258,33 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
2085 | cp_int_cntl |= RB_INT_ENABLE; | 2258 | cp_int_cntl |= RB_INT_ENABLE; |
2086 | cp_int_cntl |= TIME_STAMP_INT_ENABLE; | 2259 | cp_int_cntl |= TIME_STAMP_INT_ENABLE; |
2087 | } | 2260 | } |
2088 | if (rdev->irq.crtc_vblank_int[0]) { | 2261 | if (rdev->irq.crtc_vblank_int[0] || |
2262 | rdev->irq.pflip[0]) { | ||
2089 | DRM_DEBUG("evergreen_irq_set: vblank 0\n"); | 2263 | DRM_DEBUG("evergreen_irq_set: vblank 0\n"); |
2090 | crtc1 |= VBLANK_INT_MASK; | 2264 | crtc1 |= VBLANK_INT_MASK; |
2091 | } | 2265 | } |
2092 | if (rdev->irq.crtc_vblank_int[1]) { | 2266 | if (rdev->irq.crtc_vblank_int[1] || |
2267 | rdev->irq.pflip[1]) { | ||
2093 | DRM_DEBUG("evergreen_irq_set: vblank 1\n"); | 2268 | DRM_DEBUG("evergreen_irq_set: vblank 1\n"); |
2094 | crtc2 |= VBLANK_INT_MASK; | 2269 | crtc2 |= VBLANK_INT_MASK; |
2095 | } | 2270 | } |
2096 | if (rdev->irq.crtc_vblank_int[2]) { | 2271 | if (rdev->irq.crtc_vblank_int[2] || |
2272 | rdev->irq.pflip[2]) { | ||
2097 | DRM_DEBUG("evergreen_irq_set: vblank 2\n"); | 2273 | DRM_DEBUG("evergreen_irq_set: vblank 2\n"); |
2098 | crtc3 |= VBLANK_INT_MASK; | 2274 | crtc3 |= VBLANK_INT_MASK; |
2099 | } | 2275 | } |
2100 | if (rdev->irq.crtc_vblank_int[3]) { | 2276 | if (rdev->irq.crtc_vblank_int[3] || |
2277 | rdev->irq.pflip[3]) { | ||
2101 | DRM_DEBUG("evergreen_irq_set: vblank 3\n"); | 2278 | DRM_DEBUG("evergreen_irq_set: vblank 3\n"); |
2102 | crtc4 |= VBLANK_INT_MASK; | 2279 | crtc4 |= VBLANK_INT_MASK; |
2103 | } | 2280 | } |
2104 | if (rdev->irq.crtc_vblank_int[4]) { | 2281 | if (rdev->irq.crtc_vblank_int[4] || |
2282 | rdev->irq.pflip[4]) { | ||
2105 | DRM_DEBUG("evergreen_irq_set: vblank 4\n"); | 2283 | DRM_DEBUG("evergreen_irq_set: vblank 4\n"); |
2106 | crtc5 |= VBLANK_INT_MASK; | 2284 | crtc5 |= VBLANK_INT_MASK; |
2107 | } | 2285 | } |
2108 | if (rdev->irq.crtc_vblank_int[5]) { | 2286 | if (rdev->irq.crtc_vblank_int[5] || |
2287 | rdev->irq.pflip[5]) { | ||
2109 | DRM_DEBUG("evergreen_irq_set: vblank 5\n"); | 2288 | DRM_DEBUG("evergreen_irq_set: vblank 5\n"); |
2110 | crtc6 |= VBLANK_INT_MASK; | 2289 | crtc6 |= VBLANK_INT_MASK; |
2111 | } | 2290 | } |
@@ -2143,10 +2322,19 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
2143 | 2322 | ||
2144 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); | 2323 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); |
2145 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); | 2324 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); |
2146 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); | 2325 | if (!(rdev->flags & RADEON_IS_IGP)) { |
2147 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); | 2326 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); |
2148 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); | 2327 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); |
2149 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); | 2328 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); |
2329 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); | ||
2330 | } | ||
2331 | |||
2332 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); | ||
2333 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); | ||
2334 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); | ||
2335 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); | ||
2336 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); | ||
2337 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); | ||
2150 | 2338 | ||
2151 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | 2339 | WREG32(DC_HPD1_INT_CONTROL, hpd1); |
2152 | WREG32(DC_HPD2_INT_CONTROL, hpd2); | 2340 | WREG32(DC_HPD2_INT_CONTROL, hpd2); |
@@ -2158,79 +2346,92 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
2158 | return 0; | 2346 | return 0; |
2159 | } | 2347 | } |
2160 | 2348 | ||
2161 | static inline void evergreen_irq_ack(struct radeon_device *rdev, | 2349 | static inline void evergreen_irq_ack(struct radeon_device *rdev) |
2162 | u32 *disp_int, | ||
2163 | u32 *disp_int_cont, | ||
2164 | u32 *disp_int_cont2, | ||
2165 | u32 *disp_int_cont3, | ||
2166 | u32 *disp_int_cont4, | ||
2167 | u32 *disp_int_cont5) | ||
2168 | { | 2350 | { |
2169 | u32 tmp; | 2351 | u32 tmp; |
2170 | 2352 | ||
2171 | *disp_int = RREG32(DISP_INTERRUPT_STATUS); | 2353 | rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); |
2172 | *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); | 2354 | rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); |
2173 | *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); | 2355 | rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); |
2174 | *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); | 2356 | rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); |
2175 | *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); | 2357 | rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); |
2176 | *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); | 2358 | rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); |
2177 | 2359 | rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); | |
2178 | if (*disp_int & LB_D1_VBLANK_INTERRUPT) | 2360 | rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); |
2361 | rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); | ||
2362 | rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); | ||
2363 | rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); | ||
2364 | rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
2365 | |||
2366 | if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
2367 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | ||
2368 | if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
2369 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | ||
2370 | if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
2371 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | ||
2372 | if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
2373 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | ||
2374 | if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
2375 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | ||
2376 | if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
2377 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | ||
2378 | |||
2379 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) | ||
2179 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); | 2380 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); |
2180 | if (*disp_int & LB_D1_VLINE_INTERRUPT) | 2381 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) |
2181 | WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); | 2382 | WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); |
2182 | 2383 | ||
2183 | if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) | 2384 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) |
2184 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); | 2385 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); |
2185 | if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) | 2386 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) |
2186 | WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); | 2387 | WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); |
2187 | 2388 | ||
2188 | if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) | 2389 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) |
2189 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); | 2390 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); |
2190 | if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) | 2391 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) |
2191 | WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); | 2392 | WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); |
2192 | 2393 | ||
2193 | if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) | 2394 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) |
2194 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); | 2395 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); |
2195 | if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) | 2396 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) |
2196 | WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); | 2397 | WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); |
2197 | 2398 | ||
2198 | if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) | 2399 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) |
2199 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); | 2400 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); |
2200 | if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) | 2401 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) |
2201 | WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); | 2402 | WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); |
2202 | 2403 | ||
2203 | if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) | 2404 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) |
2204 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); | 2405 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); |
2205 | if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) | 2406 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) |
2206 | WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); | 2407 | WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); |
2207 | 2408 | ||
2208 | if (*disp_int & DC_HPD1_INTERRUPT) { | 2409 | if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { |
2209 | tmp = RREG32(DC_HPD1_INT_CONTROL); | 2410 | tmp = RREG32(DC_HPD1_INT_CONTROL); |
2210 | tmp |= DC_HPDx_INT_ACK; | 2411 | tmp |= DC_HPDx_INT_ACK; |
2211 | WREG32(DC_HPD1_INT_CONTROL, tmp); | 2412 | WREG32(DC_HPD1_INT_CONTROL, tmp); |
2212 | } | 2413 | } |
2213 | if (*disp_int_cont & DC_HPD2_INTERRUPT) { | 2414 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { |
2214 | tmp = RREG32(DC_HPD2_INT_CONTROL); | 2415 | tmp = RREG32(DC_HPD2_INT_CONTROL); |
2215 | tmp |= DC_HPDx_INT_ACK; | 2416 | tmp |= DC_HPDx_INT_ACK; |
2216 | WREG32(DC_HPD2_INT_CONTROL, tmp); | 2417 | WREG32(DC_HPD2_INT_CONTROL, tmp); |
2217 | } | 2418 | } |
2218 | if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { | 2419 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { |
2219 | tmp = RREG32(DC_HPD3_INT_CONTROL); | 2420 | tmp = RREG32(DC_HPD3_INT_CONTROL); |
2220 | tmp |= DC_HPDx_INT_ACK; | 2421 | tmp |= DC_HPDx_INT_ACK; |
2221 | WREG32(DC_HPD3_INT_CONTROL, tmp); | 2422 | WREG32(DC_HPD3_INT_CONTROL, tmp); |
2222 | } | 2423 | } |
2223 | if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { | 2424 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { |
2224 | tmp = RREG32(DC_HPD4_INT_CONTROL); | 2425 | tmp = RREG32(DC_HPD4_INT_CONTROL); |
2225 | tmp |= DC_HPDx_INT_ACK; | 2426 | tmp |= DC_HPDx_INT_ACK; |
2226 | WREG32(DC_HPD4_INT_CONTROL, tmp); | 2427 | WREG32(DC_HPD4_INT_CONTROL, tmp); |
2227 | } | 2428 | } |
2228 | if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { | 2429 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { |
2229 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 2430 | tmp = RREG32(DC_HPD5_INT_CONTROL); |
2230 | tmp |= DC_HPDx_INT_ACK; | 2431 | tmp |= DC_HPDx_INT_ACK; |
2231 | WREG32(DC_HPD5_INT_CONTROL, tmp); | 2432 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
2232 | } | 2433 | } |
2233 | if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { | 2434 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { |
2234 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 2435 | tmp = RREG32(DC_HPD5_INT_CONTROL); |
2235 | tmp |= DC_HPDx_INT_ACK; | 2436 | tmp |= DC_HPDx_INT_ACK; |
2236 | WREG32(DC_HPD6_INT_CONTROL, tmp); | 2437 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
@@ -2239,14 +2440,10 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev, | |||
2239 | 2440 | ||
2240 | void evergreen_irq_disable(struct radeon_device *rdev) | 2441 | void evergreen_irq_disable(struct radeon_device *rdev) |
2241 | { | 2442 | { |
2242 | u32 disp_int, disp_int_cont, disp_int_cont2; | ||
2243 | u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; | ||
2244 | |||
2245 | r600_disable_interrupts(rdev); | 2443 | r600_disable_interrupts(rdev); |
2246 | /* Wait and acknowledge irq */ | 2444 | /* Wait and acknowledge irq */ |
2247 | mdelay(1); | 2445 | mdelay(1); |
2248 | evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, | 2446 | evergreen_irq_ack(rdev); |
2249 | &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); | ||
2250 | evergreen_disable_interrupt_state(rdev); | 2447 | evergreen_disable_interrupt_state(rdev); |
2251 | } | 2448 | } |
2252 | 2449 | ||
@@ -2286,8 +2483,6 @@ int evergreen_irq_process(struct radeon_device *rdev) | |||
2286 | u32 rptr = rdev->ih.rptr; | 2483 | u32 rptr = rdev->ih.rptr; |
2287 | u32 src_id, src_data; | 2484 | u32 src_id, src_data; |
2288 | u32 ring_index; | 2485 | u32 ring_index; |
2289 | u32 disp_int, disp_int_cont, disp_int_cont2; | ||
2290 | u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; | ||
2291 | unsigned long flags; | 2486 | unsigned long flags; |
2292 | bool queue_hotplug = false; | 2487 | bool queue_hotplug = false; |
2293 | 2488 | ||
@@ -2308,8 +2503,7 @@ int evergreen_irq_process(struct radeon_device *rdev) | |||
2308 | 2503 | ||
2309 | restart_ih: | 2504 | restart_ih: |
2310 | /* display interrupts */ | 2505 | /* display interrupts */ |
2311 | evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, | 2506 | evergreen_irq_ack(rdev); |
2312 | &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); | ||
2313 | 2507 | ||
2314 | rdev->ih.wptr = wptr; | 2508 | rdev->ih.wptr = wptr; |
2315 | while (rptr != wptr) { | 2509 | while (rptr != wptr) { |
@@ -2322,17 +2516,21 @@ restart_ih: | |||
2322 | case 1: /* D1 vblank/vline */ | 2516 | case 1: /* D1 vblank/vline */ |
2323 | switch (src_data) { | 2517 | switch (src_data) { |
2324 | case 0: /* D1 vblank */ | 2518 | case 0: /* D1 vblank */ |
2325 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | 2519 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { |
2326 | drm_handle_vblank(rdev->ddev, 0); | 2520 | if (rdev->irq.crtc_vblank_int[0]) { |
2327 | rdev->pm.vblank_sync = true; | 2521 | drm_handle_vblank(rdev->ddev, 0); |
2328 | wake_up(&rdev->irq.vblank_queue); | 2522 | rdev->pm.vblank_sync = true; |
2329 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | 2523 | wake_up(&rdev->irq.vblank_queue); |
2524 | } | ||
2525 | if (rdev->irq.pflip[0]) | ||
2526 | radeon_crtc_handle_flip(rdev, 0); | ||
2527 | rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; | ||
2330 | DRM_DEBUG("IH: D1 vblank\n"); | 2528 | DRM_DEBUG("IH: D1 vblank\n"); |
2331 | } | 2529 | } |
2332 | break; | 2530 | break; |
2333 | case 1: /* D1 vline */ | 2531 | case 1: /* D1 vline */ |
2334 | if (disp_int & LB_D1_VLINE_INTERRUPT) { | 2532 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { |
2335 | disp_int &= ~LB_D1_VLINE_INTERRUPT; | 2533 | rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; |
2336 | DRM_DEBUG("IH: D1 vline\n"); | 2534 | DRM_DEBUG("IH: D1 vline\n"); |
2337 | } | 2535 | } |
2338 | break; | 2536 | break; |
@@ -2344,17 +2542,21 @@ restart_ih: | |||
2344 | case 2: /* D2 vblank/vline */ | 2542 | case 2: /* D2 vblank/vline */ |
2345 | switch (src_data) { | 2543 | switch (src_data) { |
2346 | case 0: /* D2 vblank */ | 2544 | case 0: /* D2 vblank */ |
2347 | if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { | 2545 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { |
2348 | drm_handle_vblank(rdev->ddev, 1); | 2546 | if (rdev->irq.crtc_vblank_int[1]) { |
2349 | rdev->pm.vblank_sync = true; | 2547 | drm_handle_vblank(rdev->ddev, 1); |
2350 | wake_up(&rdev->irq.vblank_queue); | 2548 | rdev->pm.vblank_sync = true; |
2351 | disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; | 2549 | wake_up(&rdev->irq.vblank_queue); |
2550 | } | ||
2551 | if (rdev->irq.pflip[1]) | ||
2552 | radeon_crtc_handle_flip(rdev, 1); | ||
2553 | rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; | ||
2352 | DRM_DEBUG("IH: D2 vblank\n"); | 2554 | DRM_DEBUG("IH: D2 vblank\n"); |
2353 | } | 2555 | } |
2354 | break; | 2556 | break; |
2355 | case 1: /* D2 vline */ | 2557 | case 1: /* D2 vline */ |
2356 | if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { | 2558 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { |
2357 | disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; | 2559 | rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; |
2358 | DRM_DEBUG("IH: D2 vline\n"); | 2560 | DRM_DEBUG("IH: D2 vline\n"); |
2359 | } | 2561 | } |
2360 | break; | 2562 | break; |
@@ -2366,17 +2568,21 @@ restart_ih: | |||
2366 | case 3: /* D3 vblank/vline */ | 2568 | case 3: /* D3 vblank/vline */ |
2367 | switch (src_data) { | 2569 | switch (src_data) { |
2368 | case 0: /* D3 vblank */ | 2570 | case 0: /* D3 vblank */ |
2369 | if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { | 2571 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { |
2370 | drm_handle_vblank(rdev->ddev, 2); | 2572 | if (rdev->irq.crtc_vblank_int[2]) { |
2371 | rdev->pm.vblank_sync = true; | 2573 | drm_handle_vblank(rdev->ddev, 2); |
2372 | wake_up(&rdev->irq.vblank_queue); | 2574 | rdev->pm.vblank_sync = true; |
2373 | disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; | 2575 | wake_up(&rdev->irq.vblank_queue); |
2576 | } | ||
2577 | if (rdev->irq.pflip[2]) | ||
2578 | radeon_crtc_handle_flip(rdev, 2); | ||
2579 | rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; | ||
2374 | DRM_DEBUG("IH: D3 vblank\n"); | 2580 | DRM_DEBUG("IH: D3 vblank\n"); |
2375 | } | 2581 | } |
2376 | break; | 2582 | break; |
2377 | case 1: /* D3 vline */ | 2583 | case 1: /* D3 vline */ |
2378 | if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { | 2584 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { |
2379 | disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; | 2585 | rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; |
2380 | DRM_DEBUG("IH: D3 vline\n"); | 2586 | DRM_DEBUG("IH: D3 vline\n"); |
2381 | } | 2587 | } |
2382 | break; | 2588 | break; |
@@ -2388,17 +2594,21 @@ restart_ih: | |||
2388 | case 4: /* D4 vblank/vline */ | 2594 | case 4: /* D4 vblank/vline */ |
2389 | switch (src_data) { | 2595 | switch (src_data) { |
2390 | case 0: /* D4 vblank */ | 2596 | case 0: /* D4 vblank */ |
2391 | if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { | 2597 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { |
2392 | drm_handle_vblank(rdev->ddev, 3); | 2598 | if (rdev->irq.crtc_vblank_int[3]) { |
2393 | rdev->pm.vblank_sync = true; | 2599 | drm_handle_vblank(rdev->ddev, 3); |
2394 | wake_up(&rdev->irq.vblank_queue); | 2600 | rdev->pm.vblank_sync = true; |
2395 | disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; | 2601 | wake_up(&rdev->irq.vblank_queue); |
2602 | } | ||
2603 | if (rdev->irq.pflip[3]) | ||
2604 | radeon_crtc_handle_flip(rdev, 3); | ||
2605 | rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; | ||
2396 | DRM_DEBUG("IH: D4 vblank\n"); | 2606 | DRM_DEBUG("IH: D4 vblank\n"); |
2397 | } | 2607 | } |
2398 | break; | 2608 | break; |
2399 | case 1: /* D4 vline */ | 2609 | case 1: /* D4 vline */ |
2400 | if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { | 2610 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { |
2401 | disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; | 2611 | rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; |
2402 | DRM_DEBUG("IH: D4 vline\n"); | 2612 | DRM_DEBUG("IH: D4 vline\n"); |
2403 | } | 2613 | } |
2404 | break; | 2614 | break; |
@@ -2410,17 +2620,21 @@ restart_ih: | |||
2410 | case 5: /* D5 vblank/vline */ | 2620 | case 5: /* D5 vblank/vline */ |
2411 | switch (src_data) { | 2621 | switch (src_data) { |
2412 | case 0: /* D5 vblank */ | 2622 | case 0: /* D5 vblank */ |
2413 | if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { | 2623 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { |
2414 | drm_handle_vblank(rdev->ddev, 4); | 2624 | if (rdev->irq.crtc_vblank_int[4]) { |
2415 | rdev->pm.vblank_sync = true; | 2625 | drm_handle_vblank(rdev->ddev, 4); |
2416 | wake_up(&rdev->irq.vblank_queue); | 2626 | rdev->pm.vblank_sync = true; |
2417 | disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; | 2627 | wake_up(&rdev->irq.vblank_queue); |
2628 | } | ||
2629 | if (rdev->irq.pflip[4]) | ||
2630 | radeon_crtc_handle_flip(rdev, 4); | ||
2631 | rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; | ||
2418 | DRM_DEBUG("IH: D5 vblank\n"); | 2632 | DRM_DEBUG("IH: D5 vblank\n"); |
2419 | } | 2633 | } |
2420 | break; | 2634 | break; |
2421 | case 1: /* D5 vline */ | 2635 | case 1: /* D5 vline */ |
2422 | if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { | 2636 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { |
2423 | disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; | 2637 | rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; |
2424 | DRM_DEBUG("IH: D5 vline\n"); | 2638 | DRM_DEBUG("IH: D5 vline\n"); |
2425 | } | 2639 | } |
2426 | break; | 2640 | break; |
@@ -2432,17 +2646,21 @@ restart_ih: | |||
2432 | case 6: /* D6 vblank/vline */ | 2646 | case 6: /* D6 vblank/vline */ |
2433 | switch (src_data) { | 2647 | switch (src_data) { |
2434 | case 0: /* D6 vblank */ | 2648 | case 0: /* D6 vblank */ |
2435 | if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { | 2649 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { |
2436 | drm_handle_vblank(rdev->ddev, 5); | 2650 | if (rdev->irq.crtc_vblank_int[5]) { |
2437 | rdev->pm.vblank_sync = true; | 2651 | drm_handle_vblank(rdev->ddev, 5); |
2438 | wake_up(&rdev->irq.vblank_queue); | 2652 | rdev->pm.vblank_sync = true; |
2439 | disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; | 2653 | wake_up(&rdev->irq.vblank_queue); |
2654 | } | ||
2655 | if (rdev->irq.pflip[5]) | ||
2656 | radeon_crtc_handle_flip(rdev, 5); | ||
2657 | rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; | ||
2440 | DRM_DEBUG("IH: D6 vblank\n"); | 2658 | DRM_DEBUG("IH: D6 vblank\n"); |
2441 | } | 2659 | } |
2442 | break; | 2660 | break; |
2443 | case 1: /* D6 vline */ | 2661 | case 1: /* D6 vline */ |
2444 | if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { | 2662 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { |
2445 | disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; | 2663 | rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; |
2446 | DRM_DEBUG("IH: D6 vline\n"); | 2664 | DRM_DEBUG("IH: D6 vline\n"); |
2447 | } | 2665 | } |
2448 | break; | 2666 | break; |
@@ -2454,43 +2672,43 @@ restart_ih: | |||
2454 | case 42: /* HPD hotplug */ | 2672 | case 42: /* HPD hotplug */ |
2455 | switch (src_data) { | 2673 | switch (src_data) { |
2456 | case 0: | 2674 | case 0: |
2457 | if (disp_int & DC_HPD1_INTERRUPT) { | 2675 | if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { |
2458 | disp_int &= ~DC_HPD1_INTERRUPT; | 2676 | rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; |
2459 | queue_hotplug = true; | 2677 | queue_hotplug = true; |
2460 | DRM_DEBUG("IH: HPD1\n"); | 2678 | DRM_DEBUG("IH: HPD1\n"); |
2461 | } | 2679 | } |
2462 | break; | 2680 | break; |
2463 | case 1: | 2681 | case 1: |
2464 | if (disp_int_cont & DC_HPD2_INTERRUPT) { | 2682 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { |
2465 | disp_int_cont &= ~DC_HPD2_INTERRUPT; | 2683 | rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; |
2466 | queue_hotplug = true; | 2684 | queue_hotplug = true; |
2467 | DRM_DEBUG("IH: HPD2\n"); | 2685 | DRM_DEBUG("IH: HPD2\n"); |
2468 | } | 2686 | } |
2469 | break; | 2687 | break; |
2470 | case 2: | 2688 | case 2: |
2471 | if (disp_int_cont2 & DC_HPD3_INTERRUPT) { | 2689 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { |
2472 | disp_int_cont2 &= ~DC_HPD3_INTERRUPT; | 2690 | rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; |
2473 | queue_hotplug = true; | 2691 | queue_hotplug = true; |
2474 | DRM_DEBUG("IH: HPD3\n"); | 2692 | DRM_DEBUG("IH: HPD3\n"); |
2475 | } | 2693 | } |
2476 | break; | 2694 | break; |
2477 | case 3: | 2695 | case 3: |
2478 | if (disp_int_cont3 & DC_HPD4_INTERRUPT) { | 2696 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { |
2479 | disp_int_cont3 &= ~DC_HPD4_INTERRUPT; | 2697 | rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; |
2480 | queue_hotplug = true; | 2698 | queue_hotplug = true; |
2481 | DRM_DEBUG("IH: HPD4\n"); | 2699 | DRM_DEBUG("IH: HPD4\n"); |
2482 | } | 2700 | } |
2483 | break; | 2701 | break; |
2484 | case 4: | 2702 | case 4: |
2485 | if (disp_int_cont4 & DC_HPD5_INTERRUPT) { | 2703 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { |
2486 | disp_int_cont4 &= ~DC_HPD5_INTERRUPT; | 2704 | rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; |
2487 | queue_hotplug = true; | 2705 | queue_hotplug = true; |
2488 | DRM_DEBUG("IH: HPD5\n"); | 2706 | DRM_DEBUG("IH: HPD5\n"); |
2489 | } | 2707 | } |
2490 | break; | 2708 | break; |
2491 | case 5: | 2709 | case 5: |
2492 | if (disp_int_cont5 & DC_HPD6_INTERRUPT) { | 2710 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { |
2493 | disp_int_cont5 &= ~DC_HPD6_INTERRUPT; | 2711 | rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; |
2494 | queue_hotplug = true; | 2712 | queue_hotplug = true; |
2495 | DRM_DEBUG("IH: HPD6\n"); | 2713 | DRM_DEBUG("IH: HPD6\n"); |
2496 | } | 2714 | } |
@@ -2666,12 +2884,16 @@ static bool evergreen_card_posted(struct radeon_device *rdev) | |||
2666 | u32 reg; | 2884 | u32 reg; |
2667 | 2885 | ||
2668 | /* first check CRTCs */ | 2886 | /* first check CRTCs */ |
2669 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | 2887 | if (rdev->flags & RADEON_IS_IGP) |
2670 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | | 2888 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
2671 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | | 2889 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); |
2672 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | | 2890 | else |
2673 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | | 2891 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
2674 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | 2892 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | |
2893 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | | ||
2894 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | | ||
2895 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | | ||
2896 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
2675 | if (reg & EVERGREEN_CRTC_MASTER_EN) | 2897 | if (reg & EVERGREEN_CRTC_MASTER_EN) |
2676 | return true; | 2898 | return true; |
2677 | 2899 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index e0e590110dd4..2ccd1f0545fe 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -147,7 +147,8 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
147 | radeon_ring_write(rdev, 0); | 147 | radeon_ring_write(rdev, 0); |
148 | radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); | 148 | radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); |
149 | 149 | ||
150 | if (rdev->family == CHIP_CEDAR) | 150 | if ((rdev->family == CHIP_CEDAR) || |
151 | (rdev->family == CHIP_PALM)) | ||
151 | cp_set_surface_sync(rdev, | 152 | cp_set_surface_sync(rdev, |
152 | PACKET3_TC_ACTION_ENA, 48, gpu_addr); | 153 | PACKET3_TC_ACTION_ENA, 48, gpu_addr); |
153 | else | 154 | else |
@@ -331,9 +332,31 @@ set_default_state(struct radeon_device *rdev) | |||
331 | num_hs_stack_entries = 85; | 332 | num_hs_stack_entries = 85; |
332 | num_ls_stack_entries = 85; | 333 | num_ls_stack_entries = 85; |
333 | break; | 334 | break; |
335 | case CHIP_PALM: | ||
336 | num_ps_gprs = 93; | ||
337 | num_vs_gprs = 46; | ||
338 | num_temp_gprs = 4; | ||
339 | num_gs_gprs = 31; | ||
340 | num_es_gprs = 31; | ||
341 | num_hs_gprs = 23; | ||
342 | num_ls_gprs = 23; | ||
343 | num_ps_threads = 96; | ||
344 | num_vs_threads = 16; | ||
345 | num_gs_threads = 16; | ||
346 | num_es_threads = 16; | ||
347 | num_hs_threads = 16; | ||
348 | num_ls_threads = 16; | ||
349 | num_ps_stack_entries = 42; | ||
350 | num_vs_stack_entries = 42; | ||
351 | num_gs_stack_entries = 42; | ||
352 | num_es_stack_entries = 42; | ||
353 | num_hs_stack_entries = 42; | ||
354 | num_ls_stack_entries = 42; | ||
355 | break; | ||
334 | } | 356 | } |
335 | 357 | ||
336 | if (rdev->family == CHIP_CEDAR) | 358 | if ((rdev->family == CHIP_CEDAR) || |
359 | (rdev->family == CHIP_PALM)) | ||
337 | sq_config = 0; | 360 | sq_config = 0; |
338 | else | 361 | else |
339 | sq_config = VC_ENABLE; | 362 | sq_config = VC_ENABLE; |
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 2330f3a36fd5..c781c92c3451 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h | |||
@@ -105,6 +105,11 @@ | |||
105 | #define EVERGREEN_GRPH_Y_START 0x6830 | 105 | #define EVERGREEN_GRPH_Y_START 0x6830 |
106 | #define EVERGREEN_GRPH_X_END 0x6834 | 106 | #define EVERGREEN_GRPH_X_END 0x6834 |
107 | #define EVERGREEN_GRPH_Y_END 0x6838 | 107 | #define EVERGREEN_GRPH_Y_END 0x6838 |
108 | #define EVERGREEN_GRPH_UPDATE 0x6844 | ||
109 | # define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2) | ||
110 | # define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16) | ||
111 | #define EVERGREEN_GRPH_FLIP_CONTROL 0x6848 | ||
112 | # define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0) | ||
108 | 113 | ||
109 | /* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ | 114 | /* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ |
110 | #define EVERGREEN_CUR_CONTROL 0x6998 | 115 | #define EVERGREEN_CUR_CONTROL 0x6998 |
@@ -178,6 +183,7 @@ | |||
178 | # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) | 183 | # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) |
179 | #define EVERGREEN_CRTC_STATUS 0x6e8c | 184 | #define EVERGREEN_CRTC_STATUS 0x6e8c |
180 | #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 | 185 | #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 |
186 | #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 | ||
181 | #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 | 187 | #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 |
182 | 188 | ||
183 | #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 | 189 | #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 113c70cc8b39..5b869ce86917 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -164,11 +164,13 @@ | |||
164 | #define SE_SC_BUSY (1 << 29) | 164 | #define SE_SC_BUSY (1 << 29) |
165 | #define SE_DB_BUSY (1 << 30) | 165 | #define SE_DB_BUSY (1 << 30) |
166 | #define SE_CB_BUSY (1 << 31) | 166 | #define SE_CB_BUSY (1 << 31) |
167 | 167 | /* evergreen */ | |
168 | #define CG_MULT_THERMAL_STATUS 0x740 | 168 | #define CG_MULT_THERMAL_STATUS 0x740 |
169 | #define ASIC_T(x) ((x) << 16) | 169 | #define ASIC_T(x) ((x) << 16) |
170 | #define ASIC_T_MASK 0x7FF0000 | 170 | #define ASIC_T_MASK 0x7FF0000 |
171 | #define ASIC_T_SHIFT 16 | 171 | #define ASIC_T_SHIFT 16 |
172 | /* APU */ | ||
173 | #define CG_THERMAL_STATUS 0x678 | ||
172 | 174 | ||
173 | #define HDP_HOST_PATH_CNTL 0x2C00 | 175 | #define HDP_HOST_PATH_CNTL 0x2C00 |
174 | #define HDP_NONSURFACE_BASE 0x2C04 | 176 | #define HDP_NONSURFACE_BASE 0x2C04 |
@@ -180,6 +182,7 @@ | |||
180 | #define MC_SHARED_CHMAP 0x2004 | 182 | #define MC_SHARED_CHMAP 0x2004 |
181 | #define NOOFCHAN_SHIFT 12 | 183 | #define NOOFCHAN_SHIFT 12 |
182 | #define NOOFCHAN_MASK 0x00003000 | 184 | #define NOOFCHAN_MASK 0x00003000 |
185 | #define MC_SHARED_CHREMAP 0x2008 | ||
183 | 186 | ||
184 | #define MC_ARB_RAMCFG 0x2760 | 187 | #define MC_ARB_RAMCFG 0x2760 |
185 | #define NOOFBANK_SHIFT 0 | 188 | #define NOOFBANK_SHIFT 0 |
@@ -199,6 +202,7 @@ | |||
199 | #define MC_VM_AGP_BOT 0x202C | 202 | #define MC_VM_AGP_BOT 0x202C |
200 | #define MC_VM_AGP_BASE 0x2030 | 203 | #define MC_VM_AGP_BASE 0x2030 |
201 | #define MC_VM_FB_LOCATION 0x2024 | 204 | #define MC_VM_FB_LOCATION 0x2024 |
205 | #define MC_FUS_VM_FB_OFFSET 0x2898 | ||
202 | #define MC_VM_MB_L1_TLB0_CNTL 0x2234 | 206 | #define MC_VM_MB_L1_TLB0_CNTL 0x2234 |
203 | #define MC_VM_MB_L1_TLB1_CNTL 0x2238 | 207 | #define MC_VM_MB_L1_TLB1_CNTL 0x2238 |
204 | #define MC_VM_MB_L1_TLB2_CNTL 0x223C | 208 | #define MC_VM_MB_L1_TLB2_CNTL 0x223C |
@@ -348,6 +352,9 @@ | |||
348 | #define SYNC_WALKER (1 << 25) | 352 | #define SYNC_WALKER (1 << 25) |
349 | #define SYNC_ALIGNER (1 << 26) | 353 | #define SYNC_ALIGNER (1 << 26) |
350 | 354 | ||
355 | #define TCP_CHAN_STEER_LO 0x960c | ||
356 | #define TCP_CHAN_STEER_HI 0x9610 | ||
357 | |||
351 | #define VGT_CACHE_INVALIDATION 0x88C4 | 358 | #define VGT_CACHE_INVALIDATION 0x88C4 |
352 | #define CACHE_INVALIDATION(x) ((x) << 0) | 359 | #define CACHE_INVALIDATION(x) ((x) << 0) |
353 | #define VC_ONLY 0 | 360 | #define VC_ONLY 0 |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 8e10aa9f74b0..300b4a64d8fe 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -68,6 +68,56 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
68 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 68 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
69 | */ | 69 | */ |
70 | 70 | ||
71 | void r100_pre_page_flip(struct radeon_device *rdev, int crtc) | ||
72 | { | ||
73 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; | ||
74 | u32 tmp; | ||
75 | |||
76 | /* make sure flip is at vb rather than hb */ | ||
77 | tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset); | ||
78 | tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL; | ||
79 | /* make sure pending bit is asserted */ | ||
80 | tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; | ||
81 | WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp); | ||
82 | |||
83 | /* set pageflip to happen as late as possible in the vblank interval. | ||
84 | * same field for crtc1/2 | ||
85 | */ | ||
86 | tmp = RREG32(RADEON_CRTC_GEN_CNTL); | ||
87 | tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK; | ||
88 | WREG32(RADEON_CRTC_GEN_CNTL, tmp); | ||
89 | |||
90 | /* enable the pflip int */ | ||
91 | radeon_irq_kms_pflip_irq_get(rdev, crtc); | ||
92 | } | ||
93 | |||
94 | void r100_post_page_flip(struct radeon_device *rdev, int crtc) | ||
95 | { | ||
96 | /* disable the pflip int */ | ||
97 | radeon_irq_kms_pflip_irq_put(rdev, crtc); | ||
98 | } | ||
99 | |||
100 | u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | ||
101 | { | ||
102 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
103 | u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; | ||
104 | |||
105 | /* Lock the graphics update lock */ | ||
106 | /* update the scanout addresses */ | ||
107 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); | ||
108 | |||
109 | /* Wait for update_pending to go high. */ | ||
110 | while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); | ||
111 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
112 | |||
113 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
114 | tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; | ||
115 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); | ||
116 | |||
117 | /* Return current update_pending status: */ | ||
118 | return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; | ||
119 | } | ||
120 | |||
71 | void r100_pm_get_dynpm_state(struct radeon_device *rdev) | 121 | void r100_pm_get_dynpm_state(struct radeon_device *rdev) |
72 | { | 122 | { |
73 | int i; | 123 | int i; |
@@ -526,10 +576,12 @@ int r100_irq_set(struct radeon_device *rdev) | |||
526 | if (rdev->irq.gui_idle) { | 576 | if (rdev->irq.gui_idle) { |
527 | tmp |= RADEON_GUI_IDLE_MASK; | 577 | tmp |= RADEON_GUI_IDLE_MASK; |
528 | } | 578 | } |
529 | if (rdev->irq.crtc_vblank_int[0]) { | 579 | if (rdev->irq.crtc_vblank_int[0] || |
580 | rdev->irq.pflip[0]) { | ||
530 | tmp |= RADEON_CRTC_VBLANK_MASK; | 581 | tmp |= RADEON_CRTC_VBLANK_MASK; |
531 | } | 582 | } |
532 | if (rdev->irq.crtc_vblank_int[1]) { | 583 | if (rdev->irq.crtc_vblank_int[1] || |
584 | rdev->irq.pflip[1]) { | ||
533 | tmp |= RADEON_CRTC2_VBLANK_MASK; | 585 | tmp |= RADEON_CRTC2_VBLANK_MASK; |
534 | } | 586 | } |
535 | if (rdev->irq.hpd[0]) { | 587 | if (rdev->irq.hpd[0]) { |
@@ -600,14 +652,22 @@ int r100_irq_process(struct radeon_device *rdev) | |||
600 | } | 652 | } |
601 | /* Vertical blank interrupts */ | 653 | /* Vertical blank interrupts */ |
602 | if (status & RADEON_CRTC_VBLANK_STAT) { | 654 | if (status & RADEON_CRTC_VBLANK_STAT) { |
603 | drm_handle_vblank(rdev->ddev, 0); | 655 | if (rdev->irq.crtc_vblank_int[0]) { |
604 | rdev->pm.vblank_sync = true; | 656 | drm_handle_vblank(rdev->ddev, 0); |
605 | wake_up(&rdev->irq.vblank_queue); | 657 | rdev->pm.vblank_sync = true; |
658 | wake_up(&rdev->irq.vblank_queue); | ||
659 | } | ||
660 | if (rdev->irq.pflip[0]) | ||
661 | radeon_crtc_handle_flip(rdev, 0); | ||
606 | } | 662 | } |
607 | if (status & RADEON_CRTC2_VBLANK_STAT) { | 663 | if (status & RADEON_CRTC2_VBLANK_STAT) { |
608 | drm_handle_vblank(rdev->ddev, 1); | 664 | if (rdev->irq.crtc_vblank_int[1]) { |
609 | rdev->pm.vblank_sync = true; | 665 | drm_handle_vblank(rdev->ddev, 1); |
610 | wake_up(&rdev->irq.vblank_queue); | 666 | rdev->pm.vblank_sync = true; |
667 | wake_up(&rdev->irq.vblank_queue); | ||
668 | } | ||
669 | if (rdev->irq.pflip[1]) | ||
670 | radeon_crtc_handle_flip(rdev, 1); | ||
611 | } | 671 | } |
612 | if (status & RADEON_FP_DETECT_STAT) { | 672 | if (status & RADEON_FP_DETECT_STAT) { |
613 | queue_hotplug = true; | 673 | queue_hotplug = true; |
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 6ac1f604e29b..fc437059918f 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h | |||
@@ -355,6 +355,8 @@ | |||
355 | #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 | 355 | #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 |
356 | #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 | 356 | #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 |
357 | 357 | ||
358 | #define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4 | ||
359 | |||
358 | /* master controls */ | 360 | /* master controls */ |
359 | #define AVIVO_DC_CRTC_MASTER_EN 0x60f8 | 361 | #define AVIVO_DC_CRTC_MASTER_EN 0x60f8 |
360 | #define AVIVO_DC_CRTC_TV_CONTROL 0x60fc | 362 | #define AVIVO_DC_CRTC_TV_CONTROL 0x60fc |
@@ -409,8 +411,10 @@ | |||
409 | #define AVIVO_D1GRPH_X_END 0x6134 | 411 | #define AVIVO_D1GRPH_X_END 0x6134 |
410 | #define AVIVO_D1GRPH_Y_END 0x6138 | 412 | #define AVIVO_D1GRPH_Y_END 0x6138 |
411 | #define AVIVO_D1GRPH_UPDATE 0x6144 | 413 | #define AVIVO_D1GRPH_UPDATE 0x6144 |
414 | # define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2) | ||
412 | # define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) | 415 | # define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) |
413 | #define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 | 416 | #define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 |
417 | # define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0) | ||
414 | 418 | ||
415 | #define AVIVO_D1CUR_CONTROL 0x6400 | 419 | #define AVIVO_D1CUR_CONTROL 0x6400 |
416 | # define AVIVO_D1CURSOR_EN (1 << 0) | 420 | # define AVIVO_D1CURSOR_EN (1 << 0) |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index a322d4f647bd..c6a37e036f11 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -83,6 +83,9 @@ MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); | |||
83 | MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); | 83 | MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); |
84 | MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); | 84 | MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); |
85 | MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); | 85 | MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); |
86 | MODULE_FIRMWARE("radeon/PALM_pfp.bin"); | ||
87 | MODULE_FIRMWARE("radeon/PALM_me.bin"); | ||
88 | MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); | ||
86 | 89 | ||
87 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); | 90 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); |
88 | 91 | ||
@@ -1161,7 +1164,7 @@ static void r600_mc_program(struct radeon_device *rdev) | |||
1161 | * Note: GTT start, end, size should be initialized before calling this | 1164 | * Note: GTT start, end, size should be initialized before calling this |
1162 | * function on AGP platform. | 1165 | * function on AGP platform. |
1163 | */ | 1166 | */ |
1164 | void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | 1167 | static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) |
1165 | { | 1168 | { |
1166 | u64 size_bf, size_af; | 1169 | u64 size_bf, size_af; |
1167 | 1170 | ||
@@ -2000,6 +2003,10 @@ int r600_init_microcode(struct radeon_device *rdev) | |||
2000 | chip_name = "CYPRESS"; | 2003 | chip_name = "CYPRESS"; |
2001 | rlc_chip_name = "CYPRESS"; | 2004 | rlc_chip_name = "CYPRESS"; |
2002 | break; | 2005 | break; |
2006 | case CHIP_PALM: | ||
2007 | chip_name = "PALM"; | ||
2008 | rlc_chip_name = "SUMO"; | ||
2009 | break; | ||
2003 | default: BUG(); | 2010 | default: BUG(); |
2004 | } | 2011 | } |
2005 | 2012 | ||
@@ -2865,6 +2872,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) | |||
2865 | WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | 2872 | WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
2866 | WREG32(GRBM_INT_CNTL, 0); | 2873 | WREG32(GRBM_INT_CNTL, 0); |
2867 | WREG32(DxMODE_INT_MASK, 0); | 2874 | WREG32(DxMODE_INT_MASK, 0); |
2875 | WREG32(D1GRPH_INTERRUPT_CONTROL, 0); | ||
2876 | WREG32(D2GRPH_INTERRUPT_CONTROL, 0); | ||
2868 | if (ASIC_IS_DCE3(rdev)) { | 2877 | if (ASIC_IS_DCE3(rdev)) { |
2869 | WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); | 2878 | WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); |
2870 | WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); | 2879 | WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); |
@@ -2989,6 +2998,7 @@ int r600_irq_set(struct radeon_device *rdev) | |||
2989 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; | 2998 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; |
2990 | u32 grbm_int_cntl = 0; | 2999 | u32 grbm_int_cntl = 0; |
2991 | u32 hdmi1, hdmi2; | 3000 | u32 hdmi1, hdmi2; |
3001 | u32 d1grph = 0, d2grph = 0; | ||
2992 | 3002 | ||
2993 | if (!rdev->irq.installed) { | 3003 | if (!rdev->irq.installed) { |
2994 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); | 3004 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
@@ -3025,11 +3035,13 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3025 | cp_int_cntl |= RB_INT_ENABLE; | 3035 | cp_int_cntl |= RB_INT_ENABLE; |
3026 | cp_int_cntl |= TIME_STAMP_INT_ENABLE; | 3036 | cp_int_cntl |= TIME_STAMP_INT_ENABLE; |
3027 | } | 3037 | } |
3028 | if (rdev->irq.crtc_vblank_int[0]) { | 3038 | if (rdev->irq.crtc_vblank_int[0] || |
3039 | rdev->irq.pflip[0]) { | ||
3029 | DRM_DEBUG("r600_irq_set: vblank 0\n"); | 3040 | DRM_DEBUG("r600_irq_set: vblank 0\n"); |
3030 | mode_int |= D1MODE_VBLANK_INT_MASK; | 3041 | mode_int |= D1MODE_VBLANK_INT_MASK; |
3031 | } | 3042 | } |
3032 | if (rdev->irq.crtc_vblank_int[1]) { | 3043 | if (rdev->irq.crtc_vblank_int[1] || |
3044 | rdev->irq.pflip[1]) { | ||
3033 | DRM_DEBUG("r600_irq_set: vblank 1\n"); | 3045 | DRM_DEBUG("r600_irq_set: vblank 1\n"); |
3034 | mode_int |= D2MODE_VBLANK_INT_MASK; | 3046 | mode_int |= D2MODE_VBLANK_INT_MASK; |
3035 | } | 3047 | } |
@@ -3072,6 +3084,8 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3072 | 3084 | ||
3073 | WREG32(CP_INT_CNTL, cp_int_cntl); | 3085 | WREG32(CP_INT_CNTL, cp_int_cntl); |
3074 | WREG32(DxMODE_INT_MASK, mode_int); | 3086 | WREG32(DxMODE_INT_MASK, mode_int); |
3087 | WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); | ||
3088 | WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); | ||
3075 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); | 3089 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
3076 | WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); | 3090 | WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); |
3077 | if (ASIC_IS_DCE3(rdev)) { | 3091 | if (ASIC_IS_DCE3(rdev)) { |
@@ -3094,32 +3108,35 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3094 | return 0; | 3108 | return 0; |
3095 | } | 3109 | } |
3096 | 3110 | ||
3097 | static inline void r600_irq_ack(struct radeon_device *rdev, | 3111 | static inline void r600_irq_ack(struct radeon_device *rdev) |
3098 | u32 *disp_int, | ||
3099 | u32 *disp_int_cont, | ||
3100 | u32 *disp_int_cont2) | ||
3101 | { | 3112 | { |
3102 | u32 tmp; | 3113 | u32 tmp; |
3103 | 3114 | ||
3104 | if (ASIC_IS_DCE3(rdev)) { | 3115 | if (ASIC_IS_DCE3(rdev)) { |
3105 | *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); | 3116 | rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); |
3106 | *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); | 3117 | rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); |
3107 | *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); | 3118 | rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); |
3108 | } else { | 3119 | } else { |
3109 | *disp_int = RREG32(DISP_INTERRUPT_STATUS); | 3120 | rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); |
3110 | *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); | 3121 | rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); |
3111 | *disp_int_cont2 = 0; | 3122 | rdev->irq.stat_regs.r600.disp_int_cont2 = 0; |
3112 | } | 3123 | } |
3113 | 3124 | rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); | |
3114 | if (*disp_int & LB_D1_VBLANK_INTERRUPT) | 3125 | rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); |
3126 | |||
3127 | if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED) | ||
3128 | WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); | ||
3129 | if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED) | ||
3130 | WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); | ||
3131 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) | ||
3115 | WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); | 3132 | WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); |
3116 | if (*disp_int & LB_D1_VLINE_INTERRUPT) | 3133 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) |
3117 | WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); | 3134 | WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); |
3118 | if (*disp_int & LB_D2_VBLANK_INTERRUPT) | 3135 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) |
3119 | WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); | 3136 | WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); |
3120 | if (*disp_int & LB_D2_VLINE_INTERRUPT) | 3137 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) |
3121 | WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); | 3138 | WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); |
3122 | if (*disp_int & DC_HPD1_INTERRUPT) { | 3139 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { |
3123 | if (ASIC_IS_DCE3(rdev)) { | 3140 | if (ASIC_IS_DCE3(rdev)) { |
3124 | tmp = RREG32(DC_HPD1_INT_CONTROL); | 3141 | tmp = RREG32(DC_HPD1_INT_CONTROL); |
3125 | tmp |= DC_HPDx_INT_ACK; | 3142 | tmp |= DC_HPDx_INT_ACK; |
@@ -3130,7 +3147,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev, | |||
3130 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | 3147 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
3131 | } | 3148 | } |
3132 | } | 3149 | } |
3133 | if (*disp_int & DC_HPD2_INTERRUPT) { | 3150 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { |
3134 | if (ASIC_IS_DCE3(rdev)) { | 3151 | if (ASIC_IS_DCE3(rdev)) { |
3135 | tmp = RREG32(DC_HPD2_INT_CONTROL); | 3152 | tmp = RREG32(DC_HPD2_INT_CONTROL); |
3136 | tmp |= DC_HPDx_INT_ACK; | 3153 | tmp |= DC_HPDx_INT_ACK; |
@@ -3141,7 +3158,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev, | |||
3141 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | 3158 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
3142 | } | 3159 | } |
3143 | } | 3160 | } |
3144 | if (*disp_int_cont & DC_HPD3_INTERRUPT) { | 3161 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { |
3145 | if (ASIC_IS_DCE3(rdev)) { | 3162 | if (ASIC_IS_DCE3(rdev)) { |
3146 | tmp = RREG32(DC_HPD3_INT_CONTROL); | 3163 | tmp = RREG32(DC_HPD3_INT_CONTROL); |
3147 | tmp |= DC_HPDx_INT_ACK; | 3164 | tmp |= DC_HPDx_INT_ACK; |
@@ -3152,18 +3169,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev, | |||
3152 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); | 3169 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); |
3153 | } | 3170 | } |
3154 | } | 3171 | } |
3155 | if (*disp_int_cont & DC_HPD4_INTERRUPT) { | 3172 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { |
3156 | tmp = RREG32(DC_HPD4_INT_CONTROL); | 3173 | tmp = RREG32(DC_HPD4_INT_CONTROL); |
3157 | tmp |= DC_HPDx_INT_ACK; | 3174 | tmp |= DC_HPDx_INT_ACK; |
3158 | WREG32(DC_HPD4_INT_CONTROL, tmp); | 3175 | WREG32(DC_HPD4_INT_CONTROL, tmp); |
3159 | } | 3176 | } |
3160 | if (ASIC_IS_DCE32(rdev)) { | 3177 | if (ASIC_IS_DCE32(rdev)) { |
3161 | if (*disp_int_cont2 & DC_HPD5_INTERRUPT) { | 3178 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { |
3162 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 3179 | tmp = RREG32(DC_HPD5_INT_CONTROL); |
3163 | tmp |= DC_HPDx_INT_ACK; | 3180 | tmp |= DC_HPDx_INT_ACK; |
3164 | WREG32(DC_HPD5_INT_CONTROL, tmp); | 3181 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
3165 | } | 3182 | } |
3166 | if (*disp_int_cont2 & DC_HPD6_INTERRUPT) { | 3183 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { |
3167 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 3184 | tmp = RREG32(DC_HPD5_INT_CONTROL); |
3168 | tmp |= DC_HPDx_INT_ACK; | 3185 | tmp |= DC_HPDx_INT_ACK; |
3169 | WREG32(DC_HPD6_INT_CONTROL, tmp); | 3186 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
@@ -3185,12 +3202,10 @@ static inline void r600_irq_ack(struct radeon_device *rdev, | |||
3185 | 3202 | ||
3186 | void r600_irq_disable(struct radeon_device *rdev) | 3203 | void r600_irq_disable(struct radeon_device *rdev) |
3187 | { | 3204 | { |
3188 | u32 disp_int, disp_int_cont, disp_int_cont2; | ||
3189 | |||
3190 | r600_disable_interrupts(rdev); | 3205 | r600_disable_interrupts(rdev); |
3191 | /* Wait and acknowledge irq */ | 3206 | /* Wait and acknowledge irq */ |
3192 | mdelay(1); | 3207 | mdelay(1); |
3193 | r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); | 3208 | r600_irq_ack(rdev); |
3194 | r600_disable_interrupt_state(rdev); | 3209 | r600_disable_interrupt_state(rdev); |
3195 | } | 3210 | } |
3196 | 3211 | ||
@@ -3253,7 +3268,7 @@ int r600_irq_process(struct radeon_device *rdev) | |||
3253 | u32 wptr = r600_get_ih_wptr(rdev); | 3268 | u32 wptr = r600_get_ih_wptr(rdev); |
3254 | u32 rptr = rdev->ih.rptr; | 3269 | u32 rptr = rdev->ih.rptr; |
3255 | u32 src_id, src_data; | 3270 | u32 src_id, src_data; |
3256 | u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; | 3271 | u32 ring_index; |
3257 | unsigned long flags; | 3272 | unsigned long flags; |
3258 | bool queue_hotplug = false; | 3273 | bool queue_hotplug = false; |
3259 | 3274 | ||
@@ -3274,7 +3289,7 @@ int r600_irq_process(struct radeon_device *rdev) | |||
3274 | 3289 | ||
3275 | restart_ih: | 3290 | restart_ih: |
3276 | /* display interrupts */ | 3291 | /* display interrupts */ |
3277 | r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); | 3292 | r600_irq_ack(rdev); |
3278 | 3293 | ||
3279 | rdev->ih.wptr = wptr; | 3294 | rdev->ih.wptr = wptr; |
3280 | while (rptr != wptr) { | 3295 | while (rptr != wptr) { |
@@ -3287,17 +3302,21 @@ restart_ih: | |||
3287 | case 1: /* D1 vblank/vline */ | 3302 | case 1: /* D1 vblank/vline */ |
3288 | switch (src_data) { | 3303 | switch (src_data) { |
3289 | case 0: /* D1 vblank */ | 3304 | case 0: /* D1 vblank */ |
3290 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | 3305 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { |
3291 | drm_handle_vblank(rdev->ddev, 0); | 3306 | if (rdev->irq.crtc_vblank_int[0]) { |
3292 | rdev->pm.vblank_sync = true; | 3307 | drm_handle_vblank(rdev->ddev, 0); |
3293 | wake_up(&rdev->irq.vblank_queue); | 3308 | rdev->pm.vblank_sync = true; |
3294 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | 3309 | wake_up(&rdev->irq.vblank_queue); |
3310 | } | ||
3311 | if (rdev->irq.pflip[0]) | ||
3312 | radeon_crtc_handle_flip(rdev, 0); | ||
3313 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; | ||
3295 | DRM_DEBUG("IH: D1 vblank\n"); | 3314 | DRM_DEBUG("IH: D1 vblank\n"); |
3296 | } | 3315 | } |
3297 | break; | 3316 | break; |
3298 | case 1: /* D1 vline */ | 3317 | case 1: /* D1 vline */ |
3299 | if (disp_int & LB_D1_VLINE_INTERRUPT) { | 3318 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { |
3300 | disp_int &= ~LB_D1_VLINE_INTERRUPT; | 3319 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; |
3301 | DRM_DEBUG("IH: D1 vline\n"); | 3320 | DRM_DEBUG("IH: D1 vline\n"); |
3302 | } | 3321 | } |
3303 | break; | 3322 | break; |
@@ -3309,17 +3328,21 @@ restart_ih: | |||
3309 | case 5: /* D2 vblank/vline */ | 3328 | case 5: /* D2 vblank/vline */ |
3310 | switch (src_data) { | 3329 | switch (src_data) { |
3311 | case 0: /* D2 vblank */ | 3330 | case 0: /* D2 vblank */ |
3312 | if (disp_int & LB_D2_VBLANK_INTERRUPT) { | 3331 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { |
3313 | drm_handle_vblank(rdev->ddev, 1); | 3332 | if (rdev->irq.crtc_vblank_int[1]) { |
3314 | rdev->pm.vblank_sync = true; | 3333 | drm_handle_vblank(rdev->ddev, 1); |
3315 | wake_up(&rdev->irq.vblank_queue); | 3334 | rdev->pm.vblank_sync = true; |
3316 | disp_int &= ~LB_D2_VBLANK_INTERRUPT; | 3335 | wake_up(&rdev->irq.vblank_queue); |
3336 | } | ||
3337 | if (rdev->irq.pflip[1]) | ||
3338 | radeon_crtc_handle_flip(rdev, 1); | ||
3339 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; | ||
3317 | DRM_DEBUG("IH: D2 vblank\n"); | 3340 | DRM_DEBUG("IH: D2 vblank\n"); |
3318 | } | 3341 | } |
3319 | break; | 3342 | break; |
3320 | case 1: /* D1 vline */ | 3343 | case 1: /* D1 vline */ |
3321 | if (disp_int & LB_D2_VLINE_INTERRUPT) { | 3344 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { |
3322 | disp_int &= ~LB_D2_VLINE_INTERRUPT; | 3345 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; |
3323 | DRM_DEBUG("IH: D2 vline\n"); | 3346 | DRM_DEBUG("IH: D2 vline\n"); |
3324 | } | 3347 | } |
3325 | break; | 3348 | break; |
@@ -3331,43 +3354,43 @@ restart_ih: | |||
3331 | case 19: /* HPD/DAC hotplug */ | 3354 | case 19: /* HPD/DAC hotplug */ |
3332 | switch (src_data) { | 3355 | switch (src_data) { |
3333 | case 0: | 3356 | case 0: |
3334 | if (disp_int & DC_HPD1_INTERRUPT) { | 3357 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { |
3335 | disp_int &= ~DC_HPD1_INTERRUPT; | 3358 | rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; |
3336 | queue_hotplug = true; | 3359 | queue_hotplug = true; |
3337 | DRM_DEBUG("IH: HPD1\n"); | 3360 | DRM_DEBUG("IH: HPD1\n"); |
3338 | } | 3361 | } |
3339 | break; | 3362 | break; |
3340 | case 1: | 3363 | case 1: |
3341 | if (disp_int & DC_HPD2_INTERRUPT) { | 3364 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { |
3342 | disp_int &= ~DC_HPD2_INTERRUPT; | 3365 | rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; |
3343 | queue_hotplug = true; | 3366 | queue_hotplug = true; |
3344 | DRM_DEBUG("IH: HPD2\n"); | 3367 | DRM_DEBUG("IH: HPD2\n"); |
3345 | } | 3368 | } |
3346 | break; | 3369 | break; |
3347 | case 4: | 3370 | case 4: |
3348 | if (disp_int_cont & DC_HPD3_INTERRUPT) { | 3371 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { |
3349 | disp_int_cont &= ~DC_HPD3_INTERRUPT; | 3372 | rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; |
3350 | queue_hotplug = true; | 3373 | queue_hotplug = true; |
3351 | DRM_DEBUG("IH: HPD3\n"); | 3374 | DRM_DEBUG("IH: HPD3\n"); |
3352 | } | 3375 | } |
3353 | break; | 3376 | break; |
3354 | case 5: | 3377 | case 5: |
3355 | if (disp_int_cont & DC_HPD4_INTERRUPT) { | 3378 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { |
3356 | disp_int_cont &= ~DC_HPD4_INTERRUPT; | 3379 | rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; |
3357 | queue_hotplug = true; | 3380 | queue_hotplug = true; |
3358 | DRM_DEBUG("IH: HPD4\n"); | 3381 | DRM_DEBUG("IH: HPD4\n"); |
3359 | } | 3382 | } |
3360 | break; | 3383 | break; |
3361 | case 10: | 3384 | case 10: |
3362 | if (disp_int_cont2 & DC_HPD5_INTERRUPT) { | 3385 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { |
3363 | disp_int_cont2 &= ~DC_HPD5_INTERRUPT; | 3386 | rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; |
3364 | queue_hotplug = true; | 3387 | queue_hotplug = true; |
3365 | DRM_DEBUG("IH: HPD5\n"); | 3388 | DRM_DEBUG("IH: HPD5\n"); |
3366 | } | 3389 | } |
3367 | break; | 3390 | break; |
3368 | case 12: | 3391 | case 12: |
3369 | if (disp_int_cont2 & DC_HPD6_INTERRUPT) { | 3392 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { |
3370 | disp_int_cont2 &= ~DC_HPD6_INTERRUPT; | 3393 | rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; |
3371 | queue_hotplug = true; | 3394 | queue_hotplug = true; |
3372 | DRM_DEBUG("IH: HPD6\n"); | 3395 | DRM_DEBUG("IH: HPD6\n"); |
3373 | } | 3396 | } |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index bff4dc4f410f..c89cfa8e0c05 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -728,6 +728,15 @@ | |||
728 | /* DCE 3.2 */ | 728 | /* DCE 3.2 */ |
729 | # define DC_HPDx_EN (1 << 28) | 729 | # define DC_HPDx_EN (1 << 28) |
730 | 730 | ||
731 | #define D1GRPH_INTERRUPT_STATUS 0x6158 | ||
732 | #define D2GRPH_INTERRUPT_STATUS 0x6958 | ||
733 | # define DxGRPH_PFLIP_INT_OCCURRED (1 << 0) | ||
734 | # define DxGRPH_PFLIP_INT_CLEAR (1 << 8) | ||
735 | #define D1GRPH_INTERRUPT_CONTROL 0x615c | ||
736 | #define D2GRPH_INTERRUPT_CONTROL 0x695c | ||
737 | # define DxGRPH_PFLIP_INT_MASK (1 << 0) | ||
738 | # define DxGRPH_PFLIP_INT_TYPE (1 << 8) | ||
739 | |||
731 | /* | 740 | /* |
732 | * PM4 | 741 | * PM4 |
733 | */ | 742 | */ |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3a7095743d44..431d4186ddf0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -69,6 +69,7 @@ | |||
69 | #include <ttm/ttm_bo_driver.h> | 69 | #include <ttm/ttm_bo_driver.h> |
70 | #include <ttm/ttm_placement.h> | 70 | #include <ttm/ttm_placement.h> |
71 | #include <ttm/ttm_module.h> | 71 | #include <ttm/ttm_module.h> |
72 | #include <ttm/ttm_execbuf_util.h> | ||
72 | 73 | ||
73 | #include "radeon_family.h" | 74 | #include "radeon_family.h" |
74 | #include "radeon_mode.h" | 75 | #include "radeon_mode.h" |
@@ -180,6 +181,7 @@ void rs690_pm_info(struct radeon_device *rdev); | |||
180 | extern u32 rv6xx_get_temp(struct radeon_device *rdev); | 181 | extern u32 rv6xx_get_temp(struct radeon_device *rdev); |
181 | extern u32 rv770_get_temp(struct radeon_device *rdev); | 182 | extern u32 rv770_get_temp(struct radeon_device *rdev); |
182 | extern u32 evergreen_get_temp(struct radeon_device *rdev); | 183 | extern u32 evergreen_get_temp(struct radeon_device *rdev); |
184 | extern u32 sumo_get_temp(struct radeon_device *rdev); | ||
183 | 185 | ||
184 | /* | 186 | /* |
185 | * Fences. | 187 | * Fences. |
@@ -259,13 +261,12 @@ struct radeon_bo { | |||
259 | }; | 261 | }; |
260 | 262 | ||
261 | struct radeon_bo_list { | 263 | struct radeon_bo_list { |
262 | struct list_head list; | 264 | struct ttm_validate_buffer tv; |
263 | struct radeon_bo *bo; | 265 | struct radeon_bo *bo; |
264 | uint64_t gpu_offset; | 266 | uint64_t gpu_offset; |
265 | unsigned rdomain; | 267 | unsigned rdomain; |
266 | unsigned wdomain; | 268 | unsigned wdomain; |
267 | u32 tiling_flags; | 269 | u32 tiling_flags; |
268 | bool reserved; | ||
269 | }; | 270 | }; |
270 | 271 | ||
271 | /* | 272 | /* |
@@ -377,11 +378,56 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); | |||
377 | /* | 378 | /* |
378 | * IRQS. | 379 | * IRQS. |
379 | */ | 380 | */ |
381 | |||
382 | struct radeon_unpin_work { | ||
383 | struct work_struct work; | ||
384 | struct radeon_device *rdev; | ||
385 | int crtc_id; | ||
386 | struct radeon_fence *fence; | ||
387 | struct drm_pending_vblank_event *event; | ||
388 | struct radeon_bo *old_rbo; | ||
389 | u64 new_crtc_base; | ||
390 | }; | ||
391 | |||
392 | struct r500_irq_stat_regs { | ||
393 | u32 disp_int; | ||
394 | }; | ||
395 | |||
396 | struct r600_irq_stat_regs { | ||
397 | u32 disp_int; | ||
398 | u32 disp_int_cont; | ||
399 | u32 disp_int_cont2; | ||
400 | u32 d1grph_int; | ||
401 | u32 d2grph_int; | ||
402 | }; | ||
403 | |||
404 | struct evergreen_irq_stat_regs { | ||
405 | u32 disp_int; | ||
406 | u32 disp_int_cont; | ||
407 | u32 disp_int_cont2; | ||
408 | u32 disp_int_cont3; | ||
409 | u32 disp_int_cont4; | ||
410 | u32 disp_int_cont5; | ||
411 | u32 d1grph_int; | ||
412 | u32 d2grph_int; | ||
413 | u32 d3grph_int; | ||
414 | u32 d4grph_int; | ||
415 | u32 d5grph_int; | ||
416 | u32 d6grph_int; | ||
417 | }; | ||
418 | |||
419 | union radeon_irq_stat_regs { | ||
420 | struct r500_irq_stat_regs r500; | ||
421 | struct r600_irq_stat_regs r600; | ||
422 | struct evergreen_irq_stat_regs evergreen; | ||
423 | }; | ||
424 | |||
380 | struct radeon_irq { | 425 | struct radeon_irq { |
381 | bool installed; | 426 | bool installed; |
382 | bool sw_int; | 427 | bool sw_int; |
383 | /* FIXME: use a define max crtc rather than hardcode it */ | 428 | /* FIXME: use a define max crtc rather than hardcode it */ |
384 | bool crtc_vblank_int[6]; | 429 | bool crtc_vblank_int[6]; |
430 | bool pflip[6]; | ||
385 | wait_queue_head_t vblank_queue; | 431 | wait_queue_head_t vblank_queue; |
386 | /* FIXME: use defines for max hpd/dacs */ | 432 | /* FIXME: use defines for max hpd/dacs */ |
387 | bool hpd[6]; | 433 | bool hpd[6]; |
@@ -392,12 +438,17 @@ struct radeon_irq { | |||
392 | bool hdmi[2]; | 438 | bool hdmi[2]; |
393 | spinlock_t sw_lock; | 439 | spinlock_t sw_lock; |
394 | int sw_refcount; | 440 | int sw_refcount; |
441 | union radeon_irq_stat_regs stat_regs; | ||
442 | spinlock_t pflip_lock[6]; | ||
443 | int pflip_refcount[6]; | ||
395 | }; | 444 | }; |
396 | 445 | ||
397 | int radeon_irq_kms_init(struct radeon_device *rdev); | 446 | int radeon_irq_kms_init(struct radeon_device *rdev); |
398 | void radeon_irq_kms_fini(struct radeon_device *rdev); | 447 | void radeon_irq_kms_fini(struct radeon_device *rdev); |
399 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); | 448 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); |
400 | void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); | 449 | void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); |
450 | void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); | ||
451 | void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); | ||
401 | 452 | ||
402 | /* | 453 | /* |
403 | * CP & ring. | 454 | * CP & ring. |
@@ -687,6 +738,7 @@ enum radeon_int_thermal_type { | |||
687 | THERMAL_TYPE_RV6XX, | 738 | THERMAL_TYPE_RV6XX, |
688 | THERMAL_TYPE_RV770, | 739 | THERMAL_TYPE_RV770, |
689 | THERMAL_TYPE_EVERGREEN, | 740 | THERMAL_TYPE_EVERGREEN, |
741 | THERMAL_TYPE_SUMO, | ||
690 | }; | 742 | }; |
691 | 743 | ||
692 | struct radeon_voltage { | 744 | struct radeon_voltage { |
@@ -881,6 +933,10 @@ struct radeon_asic { | |||
881 | void (*pm_finish)(struct radeon_device *rdev); | 933 | void (*pm_finish)(struct radeon_device *rdev); |
882 | void (*pm_init_profile)(struct radeon_device *rdev); | 934 | void (*pm_init_profile)(struct radeon_device *rdev); |
883 | void (*pm_get_dynpm_state)(struct radeon_device *rdev); | 935 | void (*pm_get_dynpm_state)(struct radeon_device *rdev); |
936 | /* pageflipping */ | ||
937 | void (*pre_page_flip)(struct radeon_device *rdev, int crtc); | ||
938 | u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); | ||
939 | void (*post_page_flip)(struct radeon_device *rdev, int crtc); | ||
884 | }; | 940 | }; |
885 | 941 | ||
886 | /* | 942 | /* |
@@ -1269,6 +1325,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); | |||
1269 | #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) | 1325 | #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) |
1270 | #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) | 1326 | #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) |
1271 | #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) | 1327 | #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) |
1328 | #define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM)) | ||
1272 | 1329 | ||
1273 | /* | 1330 | /* |
1274 | * BIOS helpers. | 1331 | * BIOS helpers. |
@@ -1344,6 +1401,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
1344 | #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) | 1401 | #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) |
1345 | #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) | 1402 | #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) |
1346 | #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) | 1403 | #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) |
1404 | #define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc)) | ||
1405 | #define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base)) | ||
1406 | #define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc)) | ||
1347 | 1407 | ||
1348 | /* Common functions */ | 1408 | /* Common functions */ |
1349 | /* AGP */ | 1409 | /* AGP */ |
@@ -1432,7 +1492,6 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev, | |||
1432 | struct drm_display_mode *mode2); | 1492 | struct drm_display_mode *mode2); |
1433 | 1493 | ||
1434 | /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ | 1494 | /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ |
1435 | extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); | ||
1436 | extern bool r600_card_posted(struct radeon_device *rdev); | 1495 | extern bool r600_card_posted(struct radeon_device *rdev); |
1437 | extern void r600_cp_stop(struct radeon_device *rdev); | 1496 | extern void r600_cp_stop(struct radeon_device *rdev); |
1438 | extern int r600_cp_start(struct radeon_device *rdev); | 1497 | extern int r600_cp_start(struct radeon_device *rdev); |
@@ -1478,6 +1537,7 @@ extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mo | |||
1478 | extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); | 1537 | extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); |
1479 | extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); | 1538 | extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); |
1480 | 1539 | ||
1540 | extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); | ||
1481 | extern void r700_cp_stop(struct radeon_device *rdev); | 1541 | extern void r700_cp_stop(struct radeon_device *rdev); |
1482 | extern void r700_cp_fini(struct radeon_device *rdev); | 1542 | extern void r700_cp_fini(struct radeon_device *rdev); |
1483 | extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); | 1543 | extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 64fb89ecbf74..3d73fe484f42 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -171,6 +171,9 @@ static struct radeon_asic r100_asic = { | |||
171 | .pm_finish = &r100_pm_finish, | 171 | .pm_finish = &r100_pm_finish, |
172 | .pm_init_profile = &r100_pm_init_profile, | 172 | .pm_init_profile = &r100_pm_init_profile, |
173 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 173 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
174 | .pre_page_flip = &r100_pre_page_flip, | ||
175 | .page_flip = &r100_page_flip, | ||
176 | .post_page_flip = &r100_post_page_flip, | ||
174 | }; | 177 | }; |
175 | 178 | ||
176 | static struct radeon_asic r200_asic = { | 179 | static struct radeon_asic r200_asic = { |
@@ -215,6 +218,9 @@ static struct radeon_asic r200_asic = { | |||
215 | .pm_finish = &r100_pm_finish, | 218 | .pm_finish = &r100_pm_finish, |
216 | .pm_init_profile = &r100_pm_init_profile, | 219 | .pm_init_profile = &r100_pm_init_profile, |
217 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 220 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
221 | .pre_page_flip = &r100_pre_page_flip, | ||
222 | .page_flip = &r100_page_flip, | ||
223 | .post_page_flip = &r100_post_page_flip, | ||
218 | }; | 224 | }; |
219 | 225 | ||
220 | static struct radeon_asic r300_asic = { | 226 | static struct radeon_asic r300_asic = { |
@@ -260,6 +266,9 @@ static struct radeon_asic r300_asic = { | |||
260 | .pm_finish = &r100_pm_finish, | 266 | .pm_finish = &r100_pm_finish, |
261 | .pm_init_profile = &r100_pm_init_profile, | 267 | .pm_init_profile = &r100_pm_init_profile, |
262 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 268 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
269 | .pre_page_flip = &r100_pre_page_flip, | ||
270 | .page_flip = &r100_page_flip, | ||
271 | .post_page_flip = &r100_post_page_flip, | ||
263 | }; | 272 | }; |
264 | 273 | ||
265 | static struct radeon_asic r300_asic_pcie = { | 274 | static struct radeon_asic r300_asic_pcie = { |
@@ -304,6 +313,9 @@ static struct radeon_asic r300_asic_pcie = { | |||
304 | .pm_finish = &r100_pm_finish, | 313 | .pm_finish = &r100_pm_finish, |
305 | .pm_init_profile = &r100_pm_init_profile, | 314 | .pm_init_profile = &r100_pm_init_profile, |
306 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 315 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
316 | .pre_page_flip = &r100_pre_page_flip, | ||
317 | .page_flip = &r100_page_flip, | ||
318 | .post_page_flip = &r100_post_page_flip, | ||
307 | }; | 319 | }; |
308 | 320 | ||
309 | static struct radeon_asic r420_asic = { | 321 | static struct radeon_asic r420_asic = { |
@@ -349,6 +361,9 @@ static struct radeon_asic r420_asic = { | |||
349 | .pm_finish = &r100_pm_finish, | 361 | .pm_finish = &r100_pm_finish, |
350 | .pm_init_profile = &r420_pm_init_profile, | 362 | .pm_init_profile = &r420_pm_init_profile, |
351 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 363 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
364 | .pre_page_flip = &r100_pre_page_flip, | ||
365 | .page_flip = &r100_page_flip, | ||
366 | .post_page_flip = &r100_post_page_flip, | ||
352 | }; | 367 | }; |
353 | 368 | ||
354 | static struct radeon_asic rs400_asic = { | 369 | static struct radeon_asic rs400_asic = { |
@@ -394,6 +409,9 @@ static struct radeon_asic rs400_asic = { | |||
394 | .pm_finish = &r100_pm_finish, | 409 | .pm_finish = &r100_pm_finish, |
395 | .pm_init_profile = &r100_pm_init_profile, | 410 | .pm_init_profile = &r100_pm_init_profile, |
396 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 411 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
412 | .pre_page_flip = &r100_pre_page_flip, | ||
413 | .page_flip = &r100_page_flip, | ||
414 | .post_page_flip = &r100_post_page_flip, | ||
397 | }; | 415 | }; |
398 | 416 | ||
399 | static struct radeon_asic rs600_asic = { | 417 | static struct radeon_asic rs600_asic = { |
@@ -439,6 +457,9 @@ static struct radeon_asic rs600_asic = { | |||
439 | .pm_finish = &rs600_pm_finish, | 457 | .pm_finish = &rs600_pm_finish, |
440 | .pm_init_profile = &r420_pm_init_profile, | 458 | .pm_init_profile = &r420_pm_init_profile, |
441 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 459 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
460 | .pre_page_flip = &rs600_pre_page_flip, | ||
461 | .page_flip = &rs600_page_flip, | ||
462 | .post_page_flip = &rs600_post_page_flip, | ||
442 | }; | 463 | }; |
443 | 464 | ||
444 | static struct radeon_asic rs690_asic = { | 465 | static struct radeon_asic rs690_asic = { |
@@ -484,6 +505,9 @@ static struct radeon_asic rs690_asic = { | |||
484 | .pm_finish = &rs600_pm_finish, | 505 | .pm_finish = &rs600_pm_finish, |
485 | .pm_init_profile = &r420_pm_init_profile, | 506 | .pm_init_profile = &r420_pm_init_profile, |
486 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 507 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
508 | .pre_page_flip = &rs600_pre_page_flip, | ||
509 | .page_flip = &rs600_page_flip, | ||
510 | .post_page_flip = &rs600_post_page_flip, | ||
487 | }; | 511 | }; |
488 | 512 | ||
489 | static struct radeon_asic rv515_asic = { | 513 | static struct radeon_asic rv515_asic = { |
@@ -529,6 +553,9 @@ static struct radeon_asic rv515_asic = { | |||
529 | .pm_finish = &rs600_pm_finish, | 553 | .pm_finish = &rs600_pm_finish, |
530 | .pm_init_profile = &r420_pm_init_profile, | 554 | .pm_init_profile = &r420_pm_init_profile, |
531 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 555 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
556 | .pre_page_flip = &rs600_pre_page_flip, | ||
557 | .page_flip = &rs600_page_flip, | ||
558 | .post_page_flip = &rs600_post_page_flip, | ||
532 | }; | 559 | }; |
533 | 560 | ||
534 | static struct radeon_asic r520_asic = { | 561 | static struct radeon_asic r520_asic = { |
@@ -574,6 +601,9 @@ static struct radeon_asic r520_asic = { | |||
574 | .pm_finish = &rs600_pm_finish, | 601 | .pm_finish = &rs600_pm_finish, |
575 | .pm_init_profile = &r420_pm_init_profile, | 602 | .pm_init_profile = &r420_pm_init_profile, |
576 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 603 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
604 | .pre_page_flip = &rs600_pre_page_flip, | ||
605 | .page_flip = &rs600_page_flip, | ||
606 | .post_page_flip = &rs600_post_page_flip, | ||
577 | }; | 607 | }; |
578 | 608 | ||
579 | static struct radeon_asic r600_asic = { | 609 | static struct radeon_asic r600_asic = { |
@@ -618,6 +648,9 @@ static struct radeon_asic r600_asic = { | |||
618 | .pm_finish = &rs600_pm_finish, | 648 | .pm_finish = &rs600_pm_finish, |
619 | .pm_init_profile = &r600_pm_init_profile, | 649 | .pm_init_profile = &r600_pm_init_profile, |
620 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 650 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
651 | .pre_page_flip = &rs600_pre_page_flip, | ||
652 | .page_flip = &rs600_page_flip, | ||
653 | .post_page_flip = &rs600_post_page_flip, | ||
621 | }; | 654 | }; |
622 | 655 | ||
623 | static struct radeon_asic rs780_asic = { | 656 | static struct radeon_asic rs780_asic = { |
@@ -662,6 +695,9 @@ static struct radeon_asic rs780_asic = { | |||
662 | .pm_finish = &rs600_pm_finish, | 695 | .pm_finish = &rs600_pm_finish, |
663 | .pm_init_profile = &rs780_pm_init_profile, | 696 | .pm_init_profile = &rs780_pm_init_profile, |
664 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 697 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
698 | .pre_page_flip = &rs600_pre_page_flip, | ||
699 | .page_flip = &rs600_page_flip, | ||
700 | .post_page_flip = &rs600_post_page_flip, | ||
665 | }; | 701 | }; |
666 | 702 | ||
667 | static struct radeon_asic rv770_asic = { | 703 | static struct radeon_asic rv770_asic = { |
@@ -706,6 +742,9 @@ static struct radeon_asic rv770_asic = { | |||
706 | .pm_finish = &rs600_pm_finish, | 742 | .pm_finish = &rs600_pm_finish, |
707 | .pm_init_profile = &r600_pm_init_profile, | 743 | .pm_init_profile = &r600_pm_init_profile, |
708 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 744 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
745 | .pre_page_flip = &rs600_pre_page_flip, | ||
746 | .page_flip = &rv770_page_flip, | ||
747 | .post_page_flip = &rs600_post_page_flip, | ||
709 | }; | 748 | }; |
710 | 749 | ||
711 | static struct radeon_asic evergreen_asic = { | 750 | static struct radeon_asic evergreen_asic = { |
@@ -749,6 +788,52 @@ static struct radeon_asic evergreen_asic = { | |||
749 | .pm_finish = &evergreen_pm_finish, | 788 | .pm_finish = &evergreen_pm_finish, |
750 | .pm_init_profile = &r600_pm_init_profile, | 789 | .pm_init_profile = &r600_pm_init_profile, |
751 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 790 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
791 | .pre_page_flip = &evergreen_pre_page_flip, | ||
792 | .page_flip = &evergreen_page_flip, | ||
793 | .post_page_flip = &evergreen_post_page_flip, | ||
794 | }; | ||
795 | |||
796 | static struct radeon_asic sumo_asic = { | ||
797 | .init = &evergreen_init, | ||
798 | .fini = &evergreen_fini, | ||
799 | .suspend = &evergreen_suspend, | ||
800 | .resume = &evergreen_resume, | ||
801 | .cp_commit = &r600_cp_commit, | ||
802 | .gpu_is_lockup = &evergreen_gpu_is_lockup, | ||
803 | .asic_reset = &evergreen_asic_reset, | ||
804 | .vga_set_state = &r600_vga_set_state, | ||
805 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | ||
806 | .gart_set_page = &rs600_gart_set_page, | ||
807 | .ring_test = &r600_ring_test, | ||
808 | .ring_ib_execute = &r600_ring_ib_execute, | ||
809 | .irq_set = &evergreen_irq_set, | ||
810 | .irq_process = &evergreen_irq_process, | ||
811 | .get_vblank_counter = &evergreen_get_vblank_counter, | ||
812 | .fence_ring_emit = &r600_fence_ring_emit, | ||
813 | .cs_parse = &evergreen_cs_parse, | ||
814 | .copy_blit = &evergreen_copy_blit, | ||
815 | .copy_dma = &evergreen_copy_blit, | ||
816 | .copy = &evergreen_copy_blit, | ||
817 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
818 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
819 | .get_memory_clock = NULL, | ||
820 | .set_memory_clock = NULL, | ||
821 | .get_pcie_lanes = NULL, | ||
822 | .set_pcie_lanes = NULL, | ||
823 | .set_clock_gating = NULL, | ||
824 | .set_surface_reg = r600_set_surface_reg, | ||
825 | .clear_surface_reg = r600_clear_surface_reg, | ||
826 | .bandwidth_update = &evergreen_bandwidth_update, | ||
827 | .hpd_init = &evergreen_hpd_init, | ||
828 | .hpd_fini = &evergreen_hpd_fini, | ||
829 | .hpd_sense = &evergreen_hpd_sense, | ||
830 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | ||
831 | .gui_idle = &r600_gui_idle, | ||
832 | .pm_misc = &evergreen_pm_misc, | ||
833 | .pm_prepare = &evergreen_pm_prepare, | ||
834 | .pm_finish = &evergreen_pm_finish, | ||
835 | .pm_init_profile = &rs780_pm_init_profile, | ||
836 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | ||
752 | }; | 837 | }; |
753 | 838 | ||
754 | int radeon_asic_init(struct radeon_device *rdev) | 839 | int radeon_asic_init(struct radeon_device *rdev) |
@@ -835,6 +920,9 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
835 | case CHIP_HEMLOCK: | 920 | case CHIP_HEMLOCK: |
836 | rdev->asic = &evergreen_asic; | 921 | rdev->asic = &evergreen_asic; |
837 | break; | 922 | break; |
923 | case CHIP_PALM: | ||
924 | rdev->asic = &sumo_asic; | ||
925 | break; | ||
838 | default: | 926 | default: |
839 | /* FIXME: not supported yet */ | 927 | /* FIXME: not supported yet */ |
840 | return -EINVAL; | 928 | return -EINVAL; |
@@ -849,7 +937,9 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
849 | if (rdev->flags & RADEON_SINGLE_CRTC) | 937 | if (rdev->flags & RADEON_SINGLE_CRTC) |
850 | rdev->num_crtc = 1; | 938 | rdev->num_crtc = 1; |
851 | else { | 939 | else { |
852 | if (ASIC_IS_DCE4(rdev)) | 940 | if (ASIC_IS_DCE41(rdev)) |
941 | rdev->num_crtc = 2; | ||
942 | else if (ASIC_IS_DCE4(rdev)) | ||
853 | rdev->num_crtc = 6; | 943 | rdev->num_crtc = 6; |
854 | else | 944 | else |
855 | rdev->num_crtc = 2; | 945 | rdev->num_crtc = 2; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 740988244143..4970eda1bd41 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -130,6 +130,9 @@ extern void r100_pm_prepare(struct radeon_device *rdev); | |||
130 | extern void r100_pm_finish(struct radeon_device *rdev); | 130 | extern void r100_pm_finish(struct radeon_device *rdev); |
131 | extern void r100_pm_init_profile(struct radeon_device *rdev); | 131 | extern void r100_pm_init_profile(struct radeon_device *rdev); |
132 | extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); | 132 | extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); |
133 | extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc); | ||
134 | extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | ||
135 | extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); | ||
133 | 136 | ||
134 | /* | 137 | /* |
135 | * r200,rv250,rs300,rv280 | 138 | * r200,rv250,rs300,rv280 |
@@ -205,6 +208,9 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev, | |||
205 | extern void rs600_pm_misc(struct radeon_device *rdev); | 208 | extern void rs600_pm_misc(struct radeon_device *rdev); |
206 | extern void rs600_pm_prepare(struct radeon_device *rdev); | 209 | extern void rs600_pm_prepare(struct radeon_device *rdev); |
207 | extern void rs600_pm_finish(struct radeon_device *rdev); | 210 | extern void rs600_pm_finish(struct radeon_device *rdev); |
211 | extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc); | ||
212 | extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | ||
213 | extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); | ||
208 | 214 | ||
209 | /* | 215 | /* |
210 | * rs690,rs740 | 216 | * rs690,rs740 |
@@ -287,6 +293,7 @@ void rv770_fini(struct radeon_device *rdev); | |||
287 | int rv770_suspend(struct radeon_device *rdev); | 293 | int rv770_suspend(struct radeon_device *rdev); |
288 | int rv770_resume(struct radeon_device *rdev); | 294 | int rv770_resume(struct radeon_device *rdev); |
289 | extern void rv770_pm_misc(struct radeon_device *rdev); | 295 | extern void rv770_pm_misc(struct radeon_device *rdev); |
296 | extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | ||
290 | 297 | ||
291 | /* | 298 | /* |
292 | * evergreen | 299 | * evergreen |
@@ -314,5 +321,8 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p); | |||
314 | extern void evergreen_pm_misc(struct radeon_device *rdev); | 321 | extern void evergreen_pm_misc(struct radeon_device *rdev); |
315 | extern void evergreen_pm_prepare(struct radeon_device *rdev); | 322 | extern void evergreen_pm_prepare(struct radeon_device *rdev); |
316 | extern void evergreen_pm_finish(struct radeon_device *rdev); | 323 | extern void evergreen_pm_finish(struct radeon_device *rdev); |
324 | extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); | ||
325 | extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | ||
326 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); | ||
317 | 327 | ||
318 | #endif | 328 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index bc5a2c3382d9..8e82f672263f 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -1337,6 +1337,43 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, | |||
1337 | return false; | 1337 | return false; |
1338 | } | 1338 | } |
1339 | 1339 | ||
1340 | static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, | ||
1341 | struct radeon_atom_ss *ss, | ||
1342 | int id) | ||
1343 | { | ||
1344 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
1345 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | ||
1346 | u16 data_offset, size; | ||
1347 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; | ||
1348 | u8 frev, crev; | ||
1349 | u16 percentage = 0, rate = 0; | ||
1350 | |||
1351 | /* get any igp specific overrides */ | ||
1352 | if (atom_parse_data_header(mode_info->atom_context, index, &size, | ||
1353 | &frev, &crev, &data_offset)) { | ||
1354 | igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) | ||
1355 | (mode_info->atom_context->bios + data_offset); | ||
1356 | switch (id) { | ||
1357 | case ASIC_INTERNAL_SS_ON_TMDS: | ||
1358 | percentage = le16_to_cpu(igp_info->usDVISSPercentage); | ||
1359 | rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); | ||
1360 | break; | ||
1361 | case ASIC_INTERNAL_SS_ON_HDMI: | ||
1362 | percentage = le16_to_cpu(igp_info->usHDMISSPercentage); | ||
1363 | rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); | ||
1364 | break; | ||
1365 | case ASIC_INTERNAL_SS_ON_LVDS: | ||
1366 | percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); | ||
1367 | rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); | ||
1368 | break; | ||
1369 | } | ||
1370 | if (percentage) | ||
1371 | ss->percentage = percentage; | ||
1372 | if (rate) | ||
1373 | ss->rate = rate; | ||
1374 | } | ||
1375 | } | ||
1376 | |||
1340 | union asic_ss_info { | 1377 | union asic_ss_info { |
1341 | struct _ATOM_ASIC_INTERNAL_SS_INFO info; | 1378 | struct _ATOM_ASIC_INTERNAL_SS_INFO info; |
1342 | struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; | 1379 | struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; |
@@ -1401,6 +1438,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
1401 | le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | 1438 | le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); |
1402 | ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; | 1439 | ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; |
1403 | ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); | 1440 | ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); |
1441 | if (rdev->flags & RADEON_IS_IGP) | ||
1442 | radeon_atombios_get_igp_ss_overrides(rdev, ss, id); | ||
1404 | return true; | 1443 | return true; |
1405 | } | 1444 | } |
1406 | } | 1445 | } |
@@ -1740,495 +1779,600 @@ static const char *pp_lib_thermal_controller_names[] = { | |||
1740 | "RV6xx", | 1779 | "RV6xx", |
1741 | "RV770", | 1780 | "RV770", |
1742 | "adt7473", | 1781 | "adt7473", |
1782 | "NONE", | ||
1743 | "External GPIO", | 1783 | "External GPIO", |
1744 | "Evergreen", | 1784 | "Evergreen", |
1745 | "adt7473 with internal", | 1785 | "emc2103", |
1746 | 1786 | "Sumo", | |
1747 | }; | 1787 | }; |
1748 | 1788 | ||
1749 | union power_info { | 1789 | union power_info { |
1750 | struct _ATOM_POWERPLAY_INFO info; | 1790 | struct _ATOM_POWERPLAY_INFO info; |
1751 | struct _ATOM_POWERPLAY_INFO_V2 info_2; | 1791 | struct _ATOM_POWERPLAY_INFO_V2 info_2; |
1752 | struct _ATOM_POWERPLAY_INFO_V3 info_3; | 1792 | struct _ATOM_POWERPLAY_INFO_V3 info_3; |
1753 | struct _ATOM_PPLIB_POWERPLAYTABLE info_4; | 1793 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; |
1794 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; | ||
1795 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; | ||
1754 | }; | 1796 | }; |
1755 | 1797 | ||
1756 | void radeon_atombios_get_power_modes(struct radeon_device *rdev) | 1798 | union pplib_clock_info { |
1799 | struct _ATOM_PPLIB_R600_CLOCK_INFO r600; | ||
1800 | struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; | ||
1801 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; | ||
1802 | struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; | ||
1803 | }; | ||
1804 | |||
1805 | union pplib_power_state { | ||
1806 | struct _ATOM_PPLIB_STATE v1; | ||
1807 | struct _ATOM_PPLIB_STATE_V2 v2; | ||
1808 | }; | ||
1809 | |||
1810 | static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev, | ||
1811 | int state_index, | ||
1812 | u32 misc, u32 misc2) | ||
1813 | { | ||
1814 | rdev->pm.power_state[state_index].misc = misc; | ||
1815 | rdev->pm.power_state[state_index].misc2 = misc2; | ||
1816 | /* order matters! */ | ||
1817 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) | ||
1818 | rdev->pm.power_state[state_index].type = | ||
1819 | POWER_STATE_TYPE_POWERSAVE; | ||
1820 | if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) | ||
1821 | rdev->pm.power_state[state_index].type = | ||
1822 | POWER_STATE_TYPE_BATTERY; | ||
1823 | if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) | ||
1824 | rdev->pm.power_state[state_index].type = | ||
1825 | POWER_STATE_TYPE_BATTERY; | ||
1826 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) | ||
1827 | rdev->pm.power_state[state_index].type = | ||
1828 | POWER_STATE_TYPE_BALANCED; | ||
1829 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { | ||
1830 | rdev->pm.power_state[state_index].type = | ||
1831 | POWER_STATE_TYPE_PERFORMANCE; | ||
1832 | rdev->pm.power_state[state_index].flags &= | ||
1833 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1834 | } | ||
1835 | if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) | ||
1836 | rdev->pm.power_state[state_index].type = | ||
1837 | POWER_STATE_TYPE_BALANCED; | ||
1838 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { | ||
1839 | rdev->pm.power_state[state_index].type = | ||
1840 | POWER_STATE_TYPE_DEFAULT; | ||
1841 | rdev->pm.default_power_state_index = state_index; | ||
1842 | rdev->pm.power_state[state_index].default_clock_mode = | ||
1843 | &rdev->pm.power_state[state_index].clock_info[0]; | ||
1844 | } else if (state_index == 0) { | ||
1845 | rdev->pm.power_state[state_index].clock_info[0].flags |= | ||
1846 | RADEON_PM_MODE_NO_DISPLAY; | ||
1847 | } | ||
1848 | } | ||
1849 | |||
1850 | static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) | ||
1757 | { | 1851 | { |
1758 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 1852 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
1853 | u32 misc, misc2 = 0; | ||
1854 | int num_modes = 0, i; | ||
1855 | int state_index = 0; | ||
1856 | struct radeon_i2c_bus_rec i2c_bus; | ||
1857 | union power_info *power_info; | ||
1759 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | 1858 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
1760 | u16 data_offset; | 1859 | u16 data_offset; |
1761 | u8 frev, crev; | 1860 | u8 frev, crev; |
1762 | u32 misc, misc2 = 0, sclk, mclk; | ||
1763 | union power_info *power_info; | ||
1764 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; | ||
1765 | struct _ATOM_PPLIB_STATE *power_state; | ||
1766 | int num_modes = 0, i, j; | ||
1767 | int state_index = 0, mode_index = 0; | ||
1768 | struct radeon_i2c_bus_rec i2c_bus; | ||
1769 | |||
1770 | rdev->pm.default_power_state_index = -1; | ||
1771 | 1861 | ||
1772 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | 1862 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, |
1773 | &frev, &crev, &data_offset)) { | 1863 | &frev, &crev, &data_offset)) |
1774 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | 1864 | return state_index; |
1775 | if (frev < 4) { | 1865 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); |
1776 | /* add the i2c bus for thermal/fan chip */ | 1866 | |
1777 | if (power_info->info.ucOverdriveThermalController > 0) { | 1867 | /* add the i2c bus for thermal/fan chip */ |
1778 | DRM_INFO("Possible %s thermal controller at 0x%02x\n", | 1868 | if (power_info->info.ucOverdriveThermalController > 0) { |
1779 | thermal_controller_names[power_info->info.ucOverdriveThermalController], | 1869 | DRM_INFO("Possible %s thermal controller at 0x%02x\n", |
1780 | power_info->info.ucOverdriveControllerAddress >> 1); | 1870 | thermal_controller_names[power_info->info.ucOverdriveThermalController], |
1781 | i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); | 1871 | power_info->info.ucOverdriveControllerAddress >> 1); |
1782 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); | 1872 | i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); |
1783 | if (rdev->pm.i2c_bus) { | 1873 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); |
1784 | struct i2c_board_info info = { }; | 1874 | if (rdev->pm.i2c_bus) { |
1785 | const char *name = thermal_controller_names[power_info->info. | 1875 | struct i2c_board_info info = { }; |
1786 | ucOverdriveThermalController]; | 1876 | const char *name = thermal_controller_names[power_info->info. |
1787 | info.addr = power_info->info.ucOverdriveControllerAddress >> 1; | 1877 | ucOverdriveThermalController]; |
1788 | strlcpy(info.type, name, sizeof(info.type)); | 1878 | info.addr = power_info->info.ucOverdriveControllerAddress >> 1; |
1789 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | 1879 | strlcpy(info.type, name, sizeof(info.type)); |
1790 | } | 1880 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); |
1881 | } | ||
1882 | } | ||
1883 | num_modes = power_info->info.ucNumOfPowerModeEntries; | ||
1884 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) | ||
1885 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; | ||
1886 | /* last mode is usually default, array is low to high */ | ||
1887 | for (i = 0; i < num_modes; i++) { | ||
1888 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | ||
1889 | switch (frev) { | ||
1890 | case 1: | ||
1891 | rdev->pm.power_state[state_index].num_clock_modes = 1; | ||
1892 | rdev->pm.power_state[state_index].clock_info[0].mclk = | ||
1893 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); | ||
1894 | rdev->pm.power_state[state_index].clock_info[0].sclk = | ||
1895 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); | ||
1896 | /* skip invalid modes */ | ||
1897 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | ||
1898 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | ||
1899 | continue; | ||
1900 | rdev->pm.power_state[state_index].pcie_lanes = | ||
1901 | power_info->info.asPowerPlayInfo[i].ucNumPciELanes; | ||
1902 | misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); | ||
1903 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || | ||
1904 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
1905 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1906 | VOLTAGE_GPIO; | ||
1907 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | ||
1908 | radeon_lookup_gpio(rdev, | ||
1909 | power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); | ||
1910 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) | ||
1911 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1912 | true; | ||
1913 | else | ||
1914 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1915 | false; | ||
1916 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { | ||
1917 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1918 | VOLTAGE_VDDC; | ||
1919 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | ||
1920 | power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; | ||
1791 | } | 1921 | } |
1792 | num_modes = power_info->info.ucNumOfPowerModeEntries; | 1922 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; |
1793 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) | 1923 | radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0); |
1794 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; | 1924 | state_index++; |
1795 | /* last mode is usually default, array is low to high */ | 1925 | break; |
1796 | for (i = 0; i < num_modes; i++) { | 1926 | case 2: |
1797 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 1927 | rdev->pm.power_state[state_index].num_clock_modes = 1; |
1798 | switch (frev) { | 1928 | rdev->pm.power_state[state_index].clock_info[0].mclk = |
1799 | case 1: | 1929 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); |
1800 | rdev->pm.power_state[state_index].num_clock_modes = 1; | 1930 | rdev->pm.power_state[state_index].clock_info[0].sclk = |
1801 | rdev->pm.power_state[state_index].clock_info[0].mclk = | 1931 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); |
1802 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); | 1932 | /* skip invalid modes */ |
1803 | rdev->pm.power_state[state_index].clock_info[0].sclk = | 1933 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || |
1804 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); | 1934 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) |
1805 | /* skip invalid modes */ | 1935 | continue; |
1806 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | 1936 | rdev->pm.power_state[state_index].pcie_lanes = |
1807 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | 1937 | power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; |
1808 | continue; | 1938 | misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); |
1809 | rdev->pm.power_state[state_index].pcie_lanes = | 1939 | misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); |
1810 | power_info->info.asPowerPlayInfo[i].ucNumPciELanes; | 1940 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || |
1811 | misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); | 1941 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { |
1812 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || | 1942 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
1813 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | 1943 | VOLTAGE_GPIO; |
1814 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | 1944 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = |
1815 | VOLTAGE_GPIO; | 1945 | radeon_lookup_gpio(rdev, |
1816 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | 1946 | power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); |
1817 | radeon_lookup_gpio(rdev, | 1947 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) |
1818 | power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); | 1948 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = |
1819 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) | 1949 | true; |
1820 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | 1950 | else |
1821 | true; | 1951 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = |
1822 | else | 1952 | false; |
1823 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | 1953 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { |
1824 | false; | 1954 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
1825 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { | 1955 | VOLTAGE_VDDC; |
1826 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | 1956 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = |
1827 | VOLTAGE_VDDC; | 1957 | power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; |
1828 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | ||
1829 | power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; | ||
1830 | } | ||
1831 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1832 | rdev->pm.power_state[state_index].misc = misc; | ||
1833 | /* order matters! */ | ||
1834 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) | ||
1835 | rdev->pm.power_state[state_index].type = | ||
1836 | POWER_STATE_TYPE_POWERSAVE; | ||
1837 | if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) | ||
1838 | rdev->pm.power_state[state_index].type = | ||
1839 | POWER_STATE_TYPE_BATTERY; | ||
1840 | if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) | ||
1841 | rdev->pm.power_state[state_index].type = | ||
1842 | POWER_STATE_TYPE_BATTERY; | ||
1843 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) | ||
1844 | rdev->pm.power_state[state_index].type = | ||
1845 | POWER_STATE_TYPE_BALANCED; | ||
1846 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { | ||
1847 | rdev->pm.power_state[state_index].type = | ||
1848 | POWER_STATE_TYPE_PERFORMANCE; | ||
1849 | rdev->pm.power_state[state_index].flags &= | ||
1850 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1851 | } | ||
1852 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { | ||
1853 | rdev->pm.power_state[state_index].type = | ||
1854 | POWER_STATE_TYPE_DEFAULT; | ||
1855 | rdev->pm.default_power_state_index = state_index; | ||
1856 | rdev->pm.power_state[state_index].default_clock_mode = | ||
1857 | &rdev->pm.power_state[state_index].clock_info[0]; | ||
1858 | rdev->pm.power_state[state_index].flags &= | ||
1859 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1860 | } else if (state_index == 0) { | ||
1861 | rdev->pm.power_state[state_index].clock_info[0].flags |= | ||
1862 | RADEON_PM_MODE_NO_DISPLAY; | ||
1863 | } | ||
1864 | state_index++; | ||
1865 | break; | ||
1866 | case 2: | ||
1867 | rdev->pm.power_state[state_index].num_clock_modes = 1; | ||
1868 | rdev->pm.power_state[state_index].clock_info[0].mclk = | ||
1869 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); | ||
1870 | rdev->pm.power_state[state_index].clock_info[0].sclk = | ||
1871 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); | ||
1872 | /* skip invalid modes */ | ||
1873 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | ||
1874 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | ||
1875 | continue; | ||
1876 | rdev->pm.power_state[state_index].pcie_lanes = | ||
1877 | power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; | ||
1878 | misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); | ||
1879 | misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); | ||
1880 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || | ||
1881 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
1882 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1883 | VOLTAGE_GPIO; | ||
1884 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | ||
1885 | radeon_lookup_gpio(rdev, | ||
1886 | power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); | ||
1887 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) | ||
1888 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1889 | true; | ||
1890 | else | ||
1891 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1892 | false; | ||
1893 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { | ||
1894 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1895 | VOLTAGE_VDDC; | ||
1896 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | ||
1897 | power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; | ||
1898 | } | ||
1899 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1900 | rdev->pm.power_state[state_index].misc = misc; | ||
1901 | rdev->pm.power_state[state_index].misc2 = misc2; | ||
1902 | /* order matters! */ | ||
1903 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) | ||
1904 | rdev->pm.power_state[state_index].type = | ||
1905 | POWER_STATE_TYPE_POWERSAVE; | ||
1906 | if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) | ||
1907 | rdev->pm.power_state[state_index].type = | ||
1908 | POWER_STATE_TYPE_BATTERY; | ||
1909 | if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) | ||
1910 | rdev->pm.power_state[state_index].type = | ||
1911 | POWER_STATE_TYPE_BATTERY; | ||
1912 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) | ||
1913 | rdev->pm.power_state[state_index].type = | ||
1914 | POWER_STATE_TYPE_BALANCED; | ||
1915 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { | ||
1916 | rdev->pm.power_state[state_index].type = | ||
1917 | POWER_STATE_TYPE_PERFORMANCE; | ||
1918 | rdev->pm.power_state[state_index].flags &= | ||
1919 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1920 | } | ||
1921 | if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) | ||
1922 | rdev->pm.power_state[state_index].type = | ||
1923 | POWER_STATE_TYPE_BALANCED; | ||
1924 | if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT) | ||
1925 | rdev->pm.power_state[state_index].flags &= | ||
1926 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1927 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { | ||
1928 | rdev->pm.power_state[state_index].type = | ||
1929 | POWER_STATE_TYPE_DEFAULT; | ||
1930 | rdev->pm.default_power_state_index = state_index; | ||
1931 | rdev->pm.power_state[state_index].default_clock_mode = | ||
1932 | &rdev->pm.power_state[state_index].clock_info[0]; | ||
1933 | rdev->pm.power_state[state_index].flags &= | ||
1934 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1935 | } else if (state_index == 0) { | ||
1936 | rdev->pm.power_state[state_index].clock_info[0].flags |= | ||
1937 | RADEON_PM_MODE_NO_DISPLAY; | ||
1938 | } | ||
1939 | state_index++; | ||
1940 | break; | ||
1941 | case 3: | ||
1942 | rdev->pm.power_state[state_index].num_clock_modes = 1; | ||
1943 | rdev->pm.power_state[state_index].clock_info[0].mclk = | ||
1944 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); | ||
1945 | rdev->pm.power_state[state_index].clock_info[0].sclk = | ||
1946 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); | ||
1947 | /* skip invalid modes */ | ||
1948 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | ||
1949 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | ||
1950 | continue; | ||
1951 | rdev->pm.power_state[state_index].pcie_lanes = | ||
1952 | power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; | ||
1953 | misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); | ||
1954 | misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); | ||
1955 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || | ||
1956 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
1957 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1958 | VOLTAGE_GPIO; | ||
1959 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | ||
1960 | radeon_lookup_gpio(rdev, | ||
1961 | power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); | ||
1962 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) | ||
1963 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1964 | true; | ||
1965 | else | ||
1966 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1967 | false; | ||
1968 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { | ||
1969 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1970 | VOLTAGE_VDDC; | ||
1971 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | ||
1972 | power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; | ||
1973 | if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { | ||
1974 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = | ||
1975 | true; | ||
1976 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = | ||
1977 | power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; | ||
1978 | } | ||
1979 | } | ||
1980 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1981 | rdev->pm.power_state[state_index].misc = misc; | ||
1982 | rdev->pm.power_state[state_index].misc2 = misc2; | ||
1983 | /* order matters! */ | ||
1984 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) | ||
1985 | rdev->pm.power_state[state_index].type = | ||
1986 | POWER_STATE_TYPE_POWERSAVE; | ||
1987 | if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) | ||
1988 | rdev->pm.power_state[state_index].type = | ||
1989 | POWER_STATE_TYPE_BATTERY; | ||
1990 | if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) | ||
1991 | rdev->pm.power_state[state_index].type = | ||
1992 | POWER_STATE_TYPE_BATTERY; | ||
1993 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) | ||
1994 | rdev->pm.power_state[state_index].type = | ||
1995 | POWER_STATE_TYPE_BALANCED; | ||
1996 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { | ||
1997 | rdev->pm.power_state[state_index].type = | ||
1998 | POWER_STATE_TYPE_PERFORMANCE; | ||
1999 | rdev->pm.power_state[state_index].flags &= | ||
2000 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
2001 | } | ||
2002 | if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) | ||
2003 | rdev->pm.power_state[state_index].type = | ||
2004 | POWER_STATE_TYPE_BALANCED; | ||
2005 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { | ||
2006 | rdev->pm.power_state[state_index].type = | ||
2007 | POWER_STATE_TYPE_DEFAULT; | ||
2008 | rdev->pm.default_power_state_index = state_index; | ||
2009 | rdev->pm.power_state[state_index].default_clock_mode = | ||
2010 | &rdev->pm.power_state[state_index].clock_info[0]; | ||
2011 | } else if (state_index == 0) { | ||
2012 | rdev->pm.power_state[state_index].clock_info[0].flags |= | ||
2013 | RADEON_PM_MODE_NO_DISPLAY; | ||
2014 | } | ||
2015 | state_index++; | ||
2016 | break; | ||
2017 | } | ||
2018 | } | 1958 | } |
2019 | /* last mode is usually default */ | 1959 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; |
2020 | if (rdev->pm.default_power_state_index == -1) { | 1960 | radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); |
2021 | rdev->pm.power_state[state_index - 1].type = | 1961 | state_index++; |
2022 | POWER_STATE_TYPE_DEFAULT; | 1962 | break; |
2023 | rdev->pm.default_power_state_index = state_index - 1; | 1963 | case 3: |
2024 | rdev->pm.power_state[state_index - 1].default_clock_mode = | 1964 | rdev->pm.power_state[state_index].num_clock_modes = 1; |
2025 | &rdev->pm.power_state[state_index - 1].clock_info[0]; | 1965 | rdev->pm.power_state[state_index].clock_info[0].mclk = |
2026 | rdev->pm.power_state[state_index].flags &= | 1966 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); |
2027 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | 1967 | rdev->pm.power_state[state_index].clock_info[0].sclk = |
2028 | rdev->pm.power_state[state_index].misc = 0; | 1968 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); |
2029 | rdev->pm.power_state[state_index].misc2 = 0; | 1969 | /* skip invalid modes */ |
1970 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | ||
1971 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | ||
1972 | continue; | ||
1973 | rdev->pm.power_state[state_index].pcie_lanes = | ||
1974 | power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; | ||
1975 | misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); | ||
1976 | misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); | ||
1977 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || | ||
1978 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
1979 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1980 | VOLTAGE_GPIO; | ||
1981 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | ||
1982 | radeon_lookup_gpio(rdev, | ||
1983 | power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); | ||
1984 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) | ||
1985 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1986 | true; | ||
1987 | else | ||
1988 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1989 | false; | ||
1990 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { | ||
1991 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1992 | VOLTAGE_VDDC; | ||
1993 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | ||
1994 | power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; | ||
1995 | if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { | ||
1996 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = | ||
1997 | true; | ||
1998 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = | ||
1999 | power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; | ||
2000 | } | ||
2030 | } | 2001 | } |
2002 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
2003 | radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); | ||
2004 | state_index++; | ||
2005 | break; | ||
2006 | } | ||
2007 | } | ||
2008 | /* last mode is usually default */ | ||
2009 | if (rdev->pm.default_power_state_index == -1) { | ||
2010 | rdev->pm.power_state[state_index - 1].type = | ||
2011 | POWER_STATE_TYPE_DEFAULT; | ||
2012 | rdev->pm.default_power_state_index = state_index - 1; | ||
2013 | rdev->pm.power_state[state_index - 1].default_clock_mode = | ||
2014 | &rdev->pm.power_state[state_index - 1].clock_info[0]; | ||
2015 | rdev->pm.power_state[state_index].flags &= | ||
2016 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
2017 | rdev->pm.power_state[state_index].misc = 0; | ||
2018 | rdev->pm.power_state[state_index].misc2 = 0; | ||
2019 | } | ||
2020 | return state_index; | ||
2021 | } | ||
2022 | |||
2023 | static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev, | ||
2024 | ATOM_PPLIB_THERMALCONTROLLER *controller) | ||
2025 | { | ||
2026 | struct radeon_i2c_bus_rec i2c_bus; | ||
2027 | |||
2028 | /* add the i2c bus for thermal/fan chip */ | ||
2029 | if (controller->ucType > 0) { | ||
2030 | if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { | ||
2031 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2032 | (controller->ucFanParameters & | ||
2033 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2034 | rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; | ||
2035 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { | ||
2036 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2037 | (controller->ucFanParameters & | ||
2038 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2039 | rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; | ||
2040 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { | ||
2041 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2042 | (controller->ucFanParameters & | ||
2043 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2044 | rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; | ||
2045 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { | ||
2046 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2047 | (controller->ucFanParameters & | ||
2048 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2049 | rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO; | ||
2050 | } else if ((controller->ucType == | ||
2051 | ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || | ||
2052 | (controller->ucType == | ||
2053 | ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || | ||
2054 | (controller->ucType == | ||
2055 | ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { | ||
2056 | DRM_INFO("Special thermal controller config\n"); | ||
2031 | } else { | 2057 | } else { |
2032 | int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo); | 2058 | DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", |
2033 | uint8_t fw_frev, fw_crev; | 2059 | pp_lib_thermal_controller_names[controller->ucType], |
2034 | uint16_t fw_data_offset, vddc = 0; | 2060 | controller->ucI2cAddress >> 1, |
2035 | union firmware_info *firmware_info; | 2061 | (controller->ucFanParameters & |
2036 | ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; | 2062 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); |
2037 | 2063 | i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); | |
2038 | if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL, | 2064 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); |
2039 | &fw_frev, &fw_crev, &fw_data_offset)) { | 2065 | if (rdev->pm.i2c_bus) { |
2040 | firmware_info = | 2066 | struct i2c_board_info info = { }; |
2041 | (union firmware_info *)(mode_info->atom_context->bios + | 2067 | const char *name = pp_lib_thermal_controller_names[controller->ucType]; |
2042 | fw_data_offset); | 2068 | info.addr = controller->ucI2cAddress >> 1; |
2043 | vddc = firmware_info->info_14.usBootUpVDDCVoltage; | 2069 | strlcpy(info.type, name, sizeof(info.type)); |
2070 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | ||
2044 | } | 2071 | } |
2072 | } | ||
2073 | } | ||
2074 | } | ||
2045 | 2075 | ||
2046 | /* add the i2c bus for thermal/fan chip */ | 2076 | static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev) |
2047 | if (controller->ucType > 0) { | 2077 | { |
2048 | if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { | 2078 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
2049 | DRM_INFO("Internal thermal controller %s fan control\n", | 2079 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); |
2050 | (controller->ucFanParameters & | 2080 | u8 frev, crev; |
2051 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | 2081 | u16 data_offset; |
2052 | rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; | 2082 | union firmware_info *firmware_info; |
2053 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { | 2083 | u16 vddc = 0; |
2054 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2055 | (controller->ucFanParameters & | ||
2056 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2057 | rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; | ||
2058 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { | ||
2059 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2060 | (controller->ucFanParameters & | ||
2061 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2062 | rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; | ||
2063 | } else if ((controller->ucType == | ||
2064 | ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || | ||
2065 | (controller->ucType == | ||
2066 | ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { | ||
2067 | DRM_INFO("Special thermal controller config\n"); | ||
2068 | } else { | ||
2069 | DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", | ||
2070 | pp_lib_thermal_controller_names[controller->ucType], | ||
2071 | controller->ucI2cAddress >> 1, | ||
2072 | (controller->ucFanParameters & | ||
2073 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2074 | i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); | ||
2075 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); | ||
2076 | if (rdev->pm.i2c_bus) { | ||
2077 | struct i2c_board_info info = { }; | ||
2078 | const char *name = pp_lib_thermal_controller_names[controller->ucType]; | ||
2079 | info.addr = controller->ucI2cAddress >> 1; | ||
2080 | strlcpy(info.type, name, sizeof(info.type)); | ||
2081 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | ||
2082 | } | ||
2083 | 2084 | ||
2084 | } | 2085 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
2085 | } | 2086 | &frev, &crev, &data_offset)) { |
2086 | /* first mode is usually default, followed by low to high */ | 2087 | firmware_info = |
2087 | for (i = 0; i < power_info->info_4.ucNumStates; i++) { | 2088 | (union firmware_info *)(mode_info->atom_context->bios + |
2088 | mode_index = 0; | 2089 | data_offset); |
2089 | power_state = (struct _ATOM_PPLIB_STATE *) | 2090 | vddc = firmware_info->info_14.usBootUpVDDCVoltage; |
2090 | (mode_info->atom_context->bios + | 2091 | } |
2091 | data_offset + | 2092 | |
2092 | le16_to_cpu(power_info->info_4.usStateArrayOffset) + | 2093 | return vddc; |
2093 | i * power_info->info_4.ucStateEntrySize); | 2094 | } |
2094 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) | 2095 | |
2095 | (mode_info->atom_context->bios + | 2096 | static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, |
2096 | data_offset + | 2097 | int state_index, int mode_index, |
2097 | le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) + | 2098 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info) |
2098 | (power_state->ucNonClockStateIndex * | 2099 | { |
2099 | power_info->info_4.ucNonClockSize)); | 2100 | int j; |
2100 | for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) { | 2101 | u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); |
2101 | if (rdev->flags & RADEON_IS_IGP) { | 2102 | u32 misc2 = le16_to_cpu(non_clock_info->usClassification); |
2102 | struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info = | 2103 | u16 vddc = radeon_atombios_get_default_vddc(rdev); |
2103 | (struct _ATOM_PPLIB_RS780_CLOCK_INFO *) | 2104 | |
2104 | (mode_info->atom_context->bios + | 2105 | rdev->pm.power_state[state_index].misc = misc; |
2105 | data_offset + | 2106 | rdev->pm.power_state[state_index].misc2 = misc2; |
2106 | le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + | 2107 | rdev->pm.power_state[state_index].pcie_lanes = |
2107 | (power_state->ucClockStateIndices[j] * | 2108 | ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> |
2108 | power_info->info_4.ucClockInfoSize)); | 2109 | ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; |
2109 | sclk = le16_to_cpu(clock_info->usLowEngineClockLow); | 2110 | switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { |
2110 | sclk |= clock_info->ucLowEngineClockHigh << 16; | 2111 | case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: |
2111 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; | 2112 | rdev->pm.power_state[state_index].type = |
2112 | /* skip invalid modes */ | 2113 | POWER_STATE_TYPE_BATTERY; |
2113 | if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) | 2114 | break; |
2114 | continue; | 2115 | case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: |
2115 | /* voltage works differently on IGPs */ | 2116 | rdev->pm.power_state[state_index].type = |
2116 | mode_index++; | 2117 | POWER_STATE_TYPE_BALANCED; |
2117 | } else if (ASIC_IS_DCE4(rdev)) { | 2118 | break; |
2118 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = | 2119 | case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: |
2119 | (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *) | 2120 | rdev->pm.power_state[state_index].type = |
2120 | (mode_info->atom_context->bios + | 2121 | POWER_STATE_TYPE_PERFORMANCE; |
2121 | data_offset + | 2122 | break; |
2122 | le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + | 2123 | case ATOM_PPLIB_CLASSIFICATION_UI_NONE: |
2123 | (power_state->ucClockStateIndices[j] * | 2124 | if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) |
2124 | power_info->info_4.ucClockInfoSize)); | 2125 | rdev->pm.power_state[state_index].type = |
2125 | sclk = le16_to_cpu(clock_info->usEngineClockLow); | 2126 | POWER_STATE_TYPE_PERFORMANCE; |
2126 | sclk |= clock_info->ucEngineClockHigh << 16; | 2127 | break; |
2127 | mclk = le16_to_cpu(clock_info->usMemoryClockLow); | 2128 | } |
2128 | mclk |= clock_info->ucMemoryClockHigh << 16; | 2129 | rdev->pm.power_state[state_index].flags = 0; |
2129 | rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; | 2130 | if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) |
2130 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; | 2131 | rdev->pm.power_state[state_index].flags |= |
2131 | /* skip invalid modes */ | 2132 | RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; |
2132 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || | 2133 | if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { |
2133 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) | 2134 | rdev->pm.power_state[state_index].type = |
2134 | continue; | 2135 | POWER_STATE_TYPE_DEFAULT; |
2135 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | 2136 | rdev->pm.default_power_state_index = state_index; |
2136 | VOLTAGE_SW; | 2137 | rdev->pm.power_state[state_index].default_clock_mode = |
2137 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 2138 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; |
2138 | clock_info->usVDDC; | 2139 | /* patch the table values with the default slck/mclk from firmware info */ |
2139 | /* XXX usVDDCI */ | 2140 | for (j = 0; j < mode_index; j++) { |
2140 | mode_index++; | 2141 | rdev->pm.power_state[state_index].clock_info[j].mclk = |
2141 | } else { | 2142 | rdev->clock.default_mclk; |
2142 | struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = | 2143 | rdev->pm.power_state[state_index].clock_info[j].sclk = |
2143 | (struct _ATOM_PPLIB_R600_CLOCK_INFO *) | 2144 | rdev->clock.default_sclk; |
2144 | (mode_info->atom_context->bios + | 2145 | if (vddc) |
2145 | data_offset + | 2146 | rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = |
2146 | le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + | 2147 | vddc; |
2147 | (power_state->ucClockStateIndices[j] * | 2148 | } |
2148 | power_info->info_4.ucClockInfoSize)); | 2149 | } |
2149 | sclk = le16_to_cpu(clock_info->usEngineClockLow); | 2150 | } |
2150 | sclk |= clock_info->ucEngineClockHigh << 16; | 2151 | |
2151 | mclk = le16_to_cpu(clock_info->usMemoryClockLow); | 2152 | static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, |
2152 | mclk |= clock_info->ucMemoryClockHigh << 16; | 2153 | int state_index, int mode_index, |
2153 | rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; | 2154 | union pplib_clock_info *clock_info) |
2154 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; | 2155 | { |
2155 | /* skip invalid modes */ | 2156 | u32 sclk, mclk; |
2156 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || | 2157 | |
2157 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) | 2158 | if (rdev->flags & RADEON_IS_IGP) { |
2158 | continue; | 2159 | if (rdev->family >= CHIP_PALM) { |
2159 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | 2160 | sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); |
2160 | VOLTAGE_SW; | 2161 | sclk |= clock_info->sumo.ucEngineClockHigh << 16; |
2161 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 2162 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; |
2162 | clock_info->usVDDC; | 2163 | } else { |
2163 | mode_index++; | 2164 | sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow); |
2164 | } | 2165 | sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; |
2165 | } | 2166 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; |
2166 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; | 2167 | } |
2167 | if (mode_index) { | 2168 | } else if (ASIC_IS_DCE4(rdev)) { |
2168 | misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); | 2169 | sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); |
2169 | misc2 = le16_to_cpu(non_clock_info->usClassification); | 2170 | sclk |= clock_info->evergreen.ucEngineClockHigh << 16; |
2170 | rdev->pm.power_state[state_index].misc = misc; | 2171 | mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); |
2171 | rdev->pm.power_state[state_index].misc2 = misc2; | 2172 | mclk |= clock_info->evergreen.ucMemoryClockHigh << 16; |
2172 | rdev->pm.power_state[state_index].pcie_lanes = | 2173 | rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; |
2173 | ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> | 2174 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; |
2174 | ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; | 2175 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = |
2175 | switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { | 2176 | VOLTAGE_SW; |
2176 | case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: | 2177 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = |
2177 | rdev->pm.power_state[state_index].type = | 2178 | clock_info->evergreen.usVDDC; |
2178 | POWER_STATE_TYPE_BATTERY; | 2179 | } else { |
2179 | break; | 2180 | sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); |
2180 | case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: | 2181 | sclk |= clock_info->r600.ucEngineClockHigh << 16; |
2181 | rdev->pm.power_state[state_index].type = | 2182 | mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow); |
2182 | POWER_STATE_TYPE_BALANCED; | 2183 | mclk |= clock_info->r600.ucMemoryClockHigh << 16; |
2183 | break; | 2184 | rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; |
2184 | case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: | 2185 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; |
2185 | rdev->pm.power_state[state_index].type = | 2186 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = |
2186 | POWER_STATE_TYPE_PERFORMANCE; | 2187 | VOLTAGE_SW; |
2187 | break; | 2188 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = |
2188 | case ATOM_PPLIB_CLASSIFICATION_UI_NONE: | 2189 | clock_info->r600.usVDDC; |
2189 | if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) | 2190 | } |
2190 | rdev->pm.power_state[state_index].type = | 2191 | |
2191 | POWER_STATE_TYPE_PERFORMANCE; | 2192 | if (rdev->flags & RADEON_IS_IGP) { |
2192 | break; | 2193 | /* skip invalid modes */ |
2193 | } | 2194 | if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) |
2194 | rdev->pm.power_state[state_index].flags = 0; | 2195 | return false; |
2195 | if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) | 2196 | } else { |
2196 | rdev->pm.power_state[state_index].flags |= | 2197 | /* skip invalid modes */ |
2197 | RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | 2198 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || |
2198 | if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { | 2199 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) |
2199 | rdev->pm.power_state[state_index].type = | 2200 | return false; |
2200 | POWER_STATE_TYPE_DEFAULT; | 2201 | } |
2201 | rdev->pm.default_power_state_index = state_index; | 2202 | return true; |
2202 | rdev->pm.power_state[state_index].default_clock_mode = | 2203 | } |
2203 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; | 2204 | |
2204 | /* patch the table values with the default slck/mclk from firmware info */ | 2205 | static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) |
2205 | for (j = 0; j < mode_index; j++) { | 2206 | { |
2206 | rdev->pm.power_state[state_index].clock_info[j].mclk = | 2207 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
2207 | rdev->clock.default_mclk; | 2208 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; |
2208 | rdev->pm.power_state[state_index].clock_info[j].sclk = | 2209 | union pplib_power_state *power_state; |
2209 | rdev->clock.default_sclk; | 2210 | int i, j; |
2210 | if (vddc) | 2211 | int state_index = 0, mode_index = 0; |
2211 | rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = | 2212 | union pplib_clock_info *clock_info; |
2212 | vddc; | 2213 | bool valid; |
2213 | } | 2214 | union power_info *power_info; |
2214 | } | 2215 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
2215 | state_index++; | 2216 | u16 data_offset; |
2216 | } | 2217 | u8 frev, crev; |
2217 | } | 2218 | |
2218 | /* if multiple clock modes, mark the lowest as no display */ | 2219 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, |
2219 | for (i = 0; i < state_index; i++) { | 2220 | &frev, &crev, &data_offset)) |
2220 | if (rdev->pm.power_state[i].num_clock_modes > 1) | 2221 | return state_index; |
2221 | rdev->pm.power_state[i].clock_info[0].flags |= | 2222 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); |
2222 | RADEON_PM_MODE_NO_DISPLAY; | 2223 | |
2223 | } | 2224 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); |
2224 | /* first mode is usually default */ | 2225 | /* first mode is usually default, followed by low to high */ |
2225 | if (rdev->pm.default_power_state_index == -1) { | 2226 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { |
2226 | rdev->pm.power_state[0].type = | 2227 | mode_index = 0; |
2227 | POWER_STATE_TYPE_DEFAULT; | 2228 | power_state = (union pplib_power_state *) |
2228 | rdev->pm.default_power_state_index = 0; | 2229 | (mode_info->atom_context->bios + data_offset + |
2229 | rdev->pm.power_state[0].default_clock_mode = | 2230 | le16_to_cpu(power_info->pplib.usStateArrayOffset) + |
2230 | &rdev->pm.power_state[0].clock_info[0]; | 2231 | i * power_info->pplib.ucStateEntrySize); |
2231 | } | 2232 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) |
2233 | (mode_info->atom_context->bios + data_offset + | ||
2234 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + | ||
2235 | (power_state->v1.ucNonClockStateIndex * | ||
2236 | power_info->pplib.ucNonClockSize)); | ||
2237 | for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { | ||
2238 | clock_info = (union pplib_clock_info *) | ||
2239 | (mode_info->atom_context->bios + data_offset + | ||
2240 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + | ||
2241 | (power_state->v1.ucClockStateIndices[j] * | ||
2242 | power_info->pplib.ucClockInfoSize)); | ||
2243 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | ||
2244 | state_index, mode_index, | ||
2245 | clock_info); | ||
2246 | if (valid) | ||
2247 | mode_index++; | ||
2248 | } | ||
2249 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; | ||
2250 | if (mode_index) { | ||
2251 | radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, | ||
2252 | non_clock_info); | ||
2253 | state_index++; | ||
2254 | } | ||
2255 | } | ||
2256 | /* if multiple clock modes, mark the lowest as no display */ | ||
2257 | for (i = 0; i < state_index; i++) { | ||
2258 | if (rdev->pm.power_state[i].num_clock_modes > 1) | ||
2259 | rdev->pm.power_state[i].clock_info[0].flags |= | ||
2260 | RADEON_PM_MODE_NO_DISPLAY; | ||
2261 | } | ||
2262 | /* first mode is usually default */ | ||
2263 | if (rdev->pm.default_power_state_index == -1) { | ||
2264 | rdev->pm.power_state[0].type = | ||
2265 | POWER_STATE_TYPE_DEFAULT; | ||
2266 | rdev->pm.default_power_state_index = 0; | ||
2267 | rdev->pm.power_state[0].default_clock_mode = | ||
2268 | &rdev->pm.power_state[0].clock_info[0]; | ||
2269 | } | ||
2270 | return state_index; | ||
2271 | } | ||
2272 | |||
2273 | static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) | ||
2274 | { | ||
2275 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
2276 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; | ||
2277 | union pplib_power_state *power_state; | ||
2278 | int i, j, non_clock_array_index, clock_array_index; | ||
2279 | int state_index = 0, mode_index = 0; | ||
2280 | union pplib_clock_info *clock_info; | ||
2281 | struct StateArray *state_array; | ||
2282 | struct ClockInfoArray *clock_info_array; | ||
2283 | struct NonClockInfoArray *non_clock_info_array; | ||
2284 | bool valid; | ||
2285 | union power_info *power_info; | ||
2286 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
2287 | u16 data_offset; | ||
2288 | u8 frev, crev; | ||
2289 | |||
2290 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
2291 | &frev, &crev, &data_offset)) | ||
2292 | return state_index; | ||
2293 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
2294 | |||
2295 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); | ||
2296 | state_array = (struct StateArray *) | ||
2297 | (mode_info->atom_context->bios + data_offset + | ||
2298 | power_info->pplib.usStateArrayOffset); | ||
2299 | clock_info_array = (struct ClockInfoArray *) | ||
2300 | (mode_info->atom_context->bios + data_offset + | ||
2301 | power_info->pplib.usClockInfoArrayOffset); | ||
2302 | non_clock_info_array = (struct NonClockInfoArray *) | ||
2303 | (mode_info->atom_context->bios + data_offset + | ||
2304 | power_info->pplib.usNonClockInfoArrayOffset); | ||
2305 | for (i = 0; i < state_array->ucNumEntries; i++) { | ||
2306 | mode_index = 0; | ||
2307 | power_state = (union pplib_power_state *)&state_array->states[i]; | ||
2308 | /* XXX this might be an inagua bug... */ | ||
2309 | non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ | ||
2310 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) | ||
2311 | &non_clock_info_array->nonClockInfo[non_clock_array_index]; | ||
2312 | for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { | ||
2313 | clock_array_index = power_state->v2.clockInfoIndex[j]; | ||
2314 | /* XXX this might be an inagua bug... */ | ||
2315 | if (clock_array_index >= clock_info_array->ucNumEntries) | ||
2316 | continue; | ||
2317 | clock_info = (union pplib_clock_info *) | ||
2318 | &clock_info_array->clockInfo[clock_array_index]; | ||
2319 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | ||
2320 | state_index, mode_index, | ||
2321 | clock_info); | ||
2322 | if (valid) | ||
2323 | mode_index++; | ||
2324 | } | ||
2325 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; | ||
2326 | if (mode_index) { | ||
2327 | radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, | ||
2328 | non_clock_info); | ||
2329 | state_index++; | ||
2330 | } | ||
2331 | } | ||
2332 | /* if multiple clock modes, mark the lowest as no display */ | ||
2333 | for (i = 0; i < state_index; i++) { | ||
2334 | if (rdev->pm.power_state[i].num_clock_modes > 1) | ||
2335 | rdev->pm.power_state[i].clock_info[0].flags |= | ||
2336 | RADEON_PM_MODE_NO_DISPLAY; | ||
2337 | } | ||
2338 | /* first mode is usually default */ | ||
2339 | if (rdev->pm.default_power_state_index == -1) { | ||
2340 | rdev->pm.power_state[0].type = | ||
2341 | POWER_STATE_TYPE_DEFAULT; | ||
2342 | rdev->pm.default_power_state_index = 0; | ||
2343 | rdev->pm.power_state[0].default_clock_mode = | ||
2344 | &rdev->pm.power_state[0].clock_info[0]; | ||
2345 | } | ||
2346 | return state_index; | ||
2347 | } | ||
2348 | |||
2349 | void radeon_atombios_get_power_modes(struct radeon_device *rdev) | ||
2350 | { | ||
2351 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
2352 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
2353 | u16 data_offset; | ||
2354 | u8 frev, crev; | ||
2355 | int state_index = 0; | ||
2356 | |||
2357 | rdev->pm.default_power_state_index = -1; | ||
2358 | |||
2359 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
2360 | &frev, &crev, &data_offset)) { | ||
2361 | switch (frev) { | ||
2362 | case 1: | ||
2363 | case 2: | ||
2364 | case 3: | ||
2365 | state_index = radeon_atombios_parse_power_table_1_3(rdev); | ||
2366 | break; | ||
2367 | case 4: | ||
2368 | case 5: | ||
2369 | state_index = radeon_atombios_parse_power_table_4_5(rdev); | ||
2370 | break; | ||
2371 | case 6: | ||
2372 | state_index = radeon_atombios_parse_power_table_6(rdev); | ||
2373 | break; | ||
2374 | default: | ||
2375 | break; | ||
2232 | } | 2376 | } |
2233 | } else { | 2377 | } else { |
2234 | /* add the default mode */ | 2378 | /* add the default mode */ |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 6d64a2705f12..35b5eb8fbe2a 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -77,13 +77,13 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
77 | p->relocs_ptr[i] = &p->relocs[i]; | 77 | p->relocs_ptr[i] = &p->relocs[i]; |
78 | p->relocs[i].robj = p->relocs[i].gobj->driver_private; | 78 | p->relocs[i].robj = p->relocs[i].gobj->driver_private; |
79 | p->relocs[i].lobj.bo = p->relocs[i].robj; | 79 | p->relocs[i].lobj.bo = p->relocs[i].robj; |
80 | p->relocs[i].lobj.rdomain = r->read_domains; | ||
81 | p->relocs[i].lobj.wdomain = r->write_domain; | 80 | p->relocs[i].lobj.wdomain = r->write_domain; |
81 | p->relocs[i].lobj.rdomain = r->read_domains; | ||
82 | p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; | ||
82 | p->relocs[i].handle = r->handle; | 83 | p->relocs[i].handle = r->handle; |
83 | p->relocs[i].flags = r->flags; | 84 | p->relocs[i].flags = r->flags; |
84 | INIT_LIST_HEAD(&p->relocs[i].lobj.list); | ||
85 | radeon_bo_list_add_object(&p->relocs[i].lobj, | 85 | radeon_bo_list_add_object(&p->relocs[i].lobj, |
86 | &p->validated); | 86 | &p->validated); |
87 | } | 87 | } |
88 | } | 88 | } |
89 | return radeon_bo_list_validate(&p->validated); | 89 | return radeon_bo_list_validate(&p->validated); |
@@ -189,10 +189,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
189 | { | 189 | { |
190 | unsigned i; | 190 | unsigned i; |
191 | 191 | ||
192 | if (!error && parser->ib) { | 192 | |
193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); | 193 | if (!error && parser->ib) |
194 | } | 194 | ttm_eu_fence_buffer_objects(&parser->validated, |
195 | radeon_bo_list_unreserve(&parser->validated); | 195 | parser->ib->fence); |
196 | else | ||
197 | ttm_eu_backoff_reservation(&parser->validated); | ||
198 | |||
196 | if (parser->relocs != NULL) { | 199 | if (parser->relocs != NULL) { |
197 | for (i = 0; i < parser->nrelocs; i++) { | 200 | for (i = 0; i < parser->nrelocs; i++) { |
198 | if (parser->relocs[i].gobj) | 201 | if (parser->relocs[i].gobj) |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e12e79326cb1..86660cb425ab 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -81,6 +81,7 @@ static const char radeon_family_name[][16] = { | |||
81 | "JUNIPER", | 81 | "JUNIPER", |
82 | "CYPRESS", | 82 | "CYPRESS", |
83 | "HEMLOCK", | 83 | "HEMLOCK", |
84 | "PALM", | ||
84 | "LAST", | 85 | "LAST", |
85 | }; | 86 | }; |
86 | 87 | ||
@@ -335,7 +336,12 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
335 | uint32_t reg; | 336 | uint32_t reg; |
336 | 337 | ||
337 | /* first check CRTCs */ | 338 | /* first check CRTCs */ |
338 | if (ASIC_IS_DCE4(rdev)) { | 339 | if (ASIC_IS_DCE41(rdev)) { |
340 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | ||
341 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); | ||
342 | if (reg & EVERGREEN_CRTC_MASTER_EN) | ||
343 | return true; | ||
344 | } else if (ASIC_IS_DCE4(rdev)) { | ||
339 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | 345 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
340 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | | 346 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | |
341 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | | 347 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 1df4dc6c063c..7b17e639ab32 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -183,12 +183,272 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc) | |||
183 | kfree(radeon_crtc); | 183 | kfree(radeon_crtc); |
184 | } | 184 | } |
185 | 185 | ||
186 | /* | ||
187 | * Handle unpin events outside the interrupt handler proper. | ||
188 | */ | ||
189 | static void radeon_unpin_work_func(struct work_struct *__work) | ||
190 | { | ||
191 | struct radeon_unpin_work *work = | ||
192 | container_of(__work, struct radeon_unpin_work, work); | ||
193 | int r; | ||
194 | |||
195 | /* unpin of the old buffer */ | ||
196 | r = radeon_bo_reserve(work->old_rbo, false); | ||
197 | if (likely(r == 0)) { | ||
198 | r = radeon_bo_unpin(work->old_rbo); | ||
199 | if (unlikely(r != 0)) { | ||
200 | DRM_ERROR("failed to unpin buffer after flip\n"); | ||
201 | } | ||
202 | radeon_bo_unreserve(work->old_rbo); | ||
203 | } else | ||
204 | DRM_ERROR("failed to reserve buffer after flip\n"); | ||
205 | kfree(work); | ||
206 | } | ||
207 | |||
208 | void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) | ||
209 | { | ||
210 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
211 | struct radeon_unpin_work *work; | ||
212 | struct drm_pending_vblank_event *e; | ||
213 | struct timeval now; | ||
214 | unsigned long flags; | ||
215 | u32 update_pending; | ||
216 | int vpos, hpos; | ||
217 | |||
218 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); | ||
219 | work = radeon_crtc->unpin_work; | ||
220 | if (work == NULL || | ||
221 | !radeon_fence_signaled(work->fence)) { | ||
222 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); | ||
223 | return; | ||
224 | } | ||
225 | /* New pageflip, or just completion of a previous one? */ | ||
226 | if (!radeon_crtc->deferred_flip_completion) { | ||
227 | /* do the flip (mmio) */ | ||
228 | update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base); | ||
229 | } else { | ||
230 | /* This is just a completion of a flip queued in crtc | ||
231 | * at last invocation. Make sure we go directly to | ||
232 | * completion routine. | ||
233 | */ | ||
234 | update_pending = 0; | ||
235 | radeon_crtc->deferred_flip_completion = 0; | ||
236 | } | ||
237 | |||
238 | /* Has the pageflip already completed in crtc, or is it certain | ||
239 | * to complete in this vblank? | ||
240 | */ | ||
241 | if (update_pending && | ||
242 | (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, | ||
243 | &vpos, &hpos)) && | ||
244 | (vpos >=0) && | ||
245 | (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) { | ||
246 | /* crtc didn't flip in this target vblank interval, | ||
247 | * but flip is pending in crtc. It will complete it | ||
248 | * in next vblank interval, so complete the flip at | ||
249 | * next vblank irq. | ||
250 | */ | ||
251 | radeon_crtc->deferred_flip_completion = 1; | ||
252 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); | ||
253 | return; | ||
254 | } | ||
255 | |||
256 | /* Pageflip (will be) certainly completed in this vblank. Clean up. */ | ||
257 | radeon_crtc->unpin_work = NULL; | ||
258 | |||
259 | /* wakeup userspace */ | ||
260 | if (work->event) { | ||
261 | e = work->event; | ||
262 | e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now); | ||
263 | e->event.tv_sec = now.tv_sec; | ||
264 | e->event.tv_usec = now.tv_usec; | ||
265 | list_add_tail(&e->base.link, &e->base.file_priv->event_list); | ||
266 | wake_up_interruptible(&e->base.file_priv->event_wait); | ||
267 | } | ||
268 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); | ||
269 | |||
270 | drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); | ||
271 | radeon_fence_unref(&work->fence); | ||
272 | radeon_post_page_flip(work->rdev, work->crtc_id); | ||
273 | schedule_work(&work->work); | ||
274 | } | ||
275 | |||
276 | static int radeon_crtc_page_flip(struct drm_crtc *crtc, | ||
277 | struct drm_framebuffer *fb, | ||
278 | struct drm_pending_vblank_event *event) | ||
279 | { | ||
280 | struct drm_device *dev = crtc->dev; | ||
281 | struct radeon_device *rdev = dev->dev_private; | ||
282 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
283 | struct radeon_framebuffer *old_radeon_fb; | ||
284 | struct radeon_framebuffer *new_radeon_fb; | ||
285 | struct drm_gem_object *obj; | ||
286 | struct radeon_bo *rbo; | ||
287 | struct radeon_fence *fence; | ||
288 | struct radeon_unpin_work *work; | ||
289 | unsigned long flags; | ||
290 | u32 tiling_flags, pitch_pixels; | ||
291 | u64 base; | ||
292 | int r; | ||
293 | |||
294 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
295 | if (work == NULL) | ||
296 | return -ENOMEM; | ||
297 | |||
298 | r = radeon_fence_create(rdev, &fence); | ||
299 | if (unlikely(r != 0)) { | ||
300 | kfree(work); | ||
301 | DRM_ERROR("flip queue: failed to create fence.\n"); | ||
302 | return -ENOMEM; | ||
303 | } | ||
304 | work->event = event; | ||
305 | work->rdev = rdev; | ||
306 | work->crtc_id = radeon_crtc->crtc_id; | ||
307 | work->fence = radeon_fence_ref(fence); | ||
308 | old_radeon_fb = to_radeon_framebuffer(crtc->fb); | ||
309 | new_radeon_fb = to_radeon_framebuffer(fb); | ||
310 | /* schedule unpin of the old buffer */ | ||
311 | obj = old_radeon_fb->obj; | ||
312 | rbo = obj->driver_private; | ||
313 | work->old_rbo = rbo; | ||
314 | INIT_WORK(&work->work, radeon_unpin_work_func); | ||
315 | |||
316 | /* We borrow the event spin lock for protecting unpin_work */ | ||
317 | spin_lock_irqsave(&dev->event_lock, flags); | ||
318 | if (radeon_crtc->unpin_work) { | ||
319 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
320 | kfree(work); | ||
321 | radeon_fence_unref(&fence); | ||
322 | |||
323 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
324 | return -EBUSY; | ||
325 | } | ||
326 | radeon_crtc->unpin_work = work; | ||
327 | radeon_crtc->deferred_flip_completion = 0; | ||
328 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
329 | |||
330 | /* pin the new buffer */ | ||
331 | obj = new_radeon_fb->obj; | ||
332 | rbo = obj->driver_private; | ||
333 | |||
334 | DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", | ||
335 | work->old_rbo, rbo); | ||
336 | |||
337 | r = radeon_bo_reserve(rbo, false); | ||
338 | if (unlikely(r != 0)) { | ||
339 | DRM_ERROR("failed to reserve new rbo buffer before flip\n"); | ||
340 | goto pflip_cleanup; | ||
341 | } | ||
342 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base); | ||
343 | if (unlikely(r != 0)) { | ||
344 | radeon_bo_unreserve(rbo); | ||
345 | r = -EINVAL; | ||
346 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | ||
347 | goto pflip_cleanup; | ||
348 | } | ||
349 | radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); | ||
350 | radeon_bo_unreserve(rbo); | ||
351 | |||
352 | if (!ASIC_IS_AVIVO(rdev)) { | ||
353 | /* crtc offset is from display base addr not FB location */ | ||
354 | base -= radeon_crtc->legacy_display_base_addr; | ||
355 | pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8); | ||
356 | |||
357 | if (tiling_flags & RADEON_TILING_MACRO) { | ||
358 | if (ASIC_IS_R300(rdev)) { | ||
359 | base &= ~0x7ff; | ||
360 | } else { | ||
361 | int byteshift = fb->bits_per_pixel >> 4; | ||
362 | int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; | ||
363 | base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); | ||
364 | } | ||
365 | } else { | ||
366 | int offset = crtc->y * pitch_pixels + crtc->x; | ||
367 | switch (fb->bits_per_pixel) { | ||
368 | case 8: | ||
369 | default: | ||
370 | offset *= 1; | ||
371 | break; | ||
372 | case 15: | ||
373 | case 16: | ||
374 | offset *= 2; | ||
375 | break; | ||
376 | case 24: | ||
377 | offset *= 3; | ||
378 | break; | ||
379 | case 32: | ||
380 | offset *= 4; | ||
381 | break; | ||
382 | } | ||
383 | base += offset; | ||
384 | } | ||
385 | base &= ~7; | ||
386 | } | ||
387 | |||
388 | spin_lock_irqsave(&dev->event_lock, flags); | ||
389 | work->new_crtc_base = base; | ||
390 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
391 | |||
392 | /* update crtc fb */ | ||
393 | crtc->fb = fb; | ||
394 | |||
395 | r = drm_vblank_get(dev, radeon_crtc->crtc_id); | ||
396 | if (r) { | ||
397 | DRM_ERROR("failed to get vblank before flip\n"); | ||
398 | goto pflip_cleanup1; | ||
399 | } | ||
400 | |||
401 | /* 32 ought to cover us */ | ||
402 | r = radeon_ring_lock(rdev, 32); | ||
403 | if (r) { | ||
404 | DRM_ERROR("failed to lock the ring before flip\n"); | ||
405 | goto pflip_cleanup2; | ||
406 | } | ||
407 | |||
408 | /* emit the fence */ | ||
409 | radeon_fence_emit(rdev, fence); | ||
410 | /* set the proper interrupt */ | ||
411 | radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); | ||
412 | /* fire the ring */ | ||
413 | radeon_ring_unlock_commit(rdev); | ||
414 | |||
415 | return 0; | ||
416 | |||
417 | pflip_cleanup2: | ||
418 | drm_vblank_put(dev, radeon_crtc->crtc_id); | ||
419 | |||
420 | pflip_cleanup1: | ||
421 | r = radeon_bo_reserve(rbo, false); | ||
422 | if (unlikely(r != 0)) { | ||
423 | DRM_ERROR("failed to reserve new rbo in error path\n"); | ||
424 | goto pflip_cleanup; | ||
425 | } | ||
426 | r = radeon_bo_unpin(rbo); | ||
427 | if (unlikely(r != 0)) { | ||
428 | radeon_bo_unreserve(rbo); | ||
429 | r = -EINVAL; | ||
430 | DRM_ERROR("failed to unpin new rbo in error path\n"); | ||
431 | goto pflip_cleanup; | ||
432 | } | ||
433 | radeon_bo_unreserve(rbo); | ||
434 | |||
435 | pflip_cleanup: | ||
436 | spin_lock_irqsave(&dev->event_lock, flags); | ||
437 | radeon_crtc->unpin_work = NULL; | ||
438 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
439 | radeon_fence_unref(&fence); | ||
440 | kfree(work); | ||
441 | |||
442 | return r; | ||
443 | } | ||
444 | |||
186 | static const struct drm_crtc_funcs radeon_crtc_funcs = { | 445 | static const struct drm_crtc_funcs radeon_crtc_funcs = { |
187 | .cursor_set = radeon_crtc_cursor_set, | 446 | .cursor_set = radeon_crtc_cursor_set, |
188 | .cursor_move = radeon_crtc_cursor_move, | 447 | .cursor_move = radeon_crtc_cursor_move, |
189 | .gamma_set = radeon_crtc_gamma_set, | 448 | .gamma_set = radeon_crtc_gamma_set, |
190 | .set_config = drm_crtc_helper_set_config, | 449 | .set_config = drm_crtc_helper_set_config, |
191 | .destroy = radeon_crtc_destroy, | 450 | .destroy = radeon_crtc_destroy, |
451 | .page_flip = radeon_crtc_page_flip, | ||
192 | }; | 452 | }; |
193 | 453 | ||
194 | static void radeon_crtc_init(struct drm_device *dev, int index) | 454 | static void radeon_crtc_init(struct drm_device *dev, int index) |
@@ -225,7 +485,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) | |||
225 | radeon_legacy_init_crtc(dev, radeon_crtc); | 485 | radeon_legacy_init_crtc(dev, radeon_crtc); |
226 | } | 486 | } |
227 | 487 | ||
228 | static const char *encoder_names[34] = { | 488 | static const char *encoder_names[36] = { |
229 | "NONE", | 489 | "NONE", |
230 | "INTERNAL_LVDS", | 490 | "INTERNAL_LVDS", |
231 | "INTERNAL_TMDS1", | 491 | "INTERNAL_TMDS1", |
@@ -260,6 +520,8 @@ static const char *encoder_names[34] = { | |||
260 | "INTERNAL_KLDSCP_LVTMA", | 520 | "INTERNAL_KLDSCP_LVTMA", |
261 | "INTERNAL_UNIPHY1", | 521 | "INTERNAL_UNIPHY1", |
262 | "INTERNAL_UNIPHY2", | 522 | "INTERNAL_UNIPHY2", |
523 | "NUTMEG", | ||
524 | "TRAVIS", | ||
263 | }; | 525 | }; |
264 | 526 | ||
265 | static const char *connector_names[15] = { | 527 | static const char *connector_names[15] = { |
@@ -1019,7 +1281,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1019 | /* | 1281 | /* |
1020 | * Retrieve current video scanout position of crtc on a given gpu. | 1282 | * Retrieve current video scanout position of crtc on a given gpu. |
1021 | * | 1283 | * |
1022 | * \param rdev Device to query. | 1284 | * \param dev Device to query. |
1023 | * \param crtc Crtc to query. | 1285 | * \param crtc Crtc to query. |
1024 | * \param *vpos Location where vertical scanout position should be stored. | 1286 | * \param *vpos Location where vertical scanout position should be stored. |
1025 | * \param *hpos Location where horizontal scanout position should go. | 1287 | * \param *hpos Location where horizontal scanout position should go. |
@@ -1031,72 +1293,74 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1031 | * | 1293 | * |
1032 | * \return Flags, or'ed together as follows: | 1294 | * \return Flags, or'ed together as follows: |
1033 | * | 1295 | * |
1034 | * RADEON_SCANOUTPOS_VALID = Query successfull. | 1296 | * DRM_SCANOUTPOS_VALID = Query successfull. |
1035 | * RADEON_SCANOUTPOS_INVBL = Inside vblank. | 1297 | * DRM_SCANOUTPOS_INVBL = Inside vblank. |
1036 | * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of | 1298 | * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of |
1037 | * this flag means that returned position may be offset by a constant but | 1299 | * this flag means that returned position may be offset by a constant but |
1038 | * unknown small number of scanlines wrt. real scanout position. | 1300 | * unknown small number of scanlines wrt. real scanout position. |
1039 | * | 1301 | * |
1040 | */ | 1302 | */ |
1041 | int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos) | 1303 | int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos) |
1042 | { | 1304 | { |
1043 | u32 stat_crtc = 0, vbl = 0, position = 0; | 1305 | u32 stat_crtc = 0, vbl = 0, position = 0; |
1044 | int vbl_start, vbl_end, vtotal, ret = 0; | 1306 | int vbl_start, vbl_end, vtotal, ret = 0; |
1045 | bool in_vbl = true; | 1307 | bool in_vbl = true; |
1046 | 1308 | ||
1309 | struct radeon_device *rdev = dev->dev_private; | ||
1310 | |||
1047 | if (ASIC_IS_DCE4(rdev)) { | 1311 | if (ASIC_IS_DCE4(rdev)) { |
1048 | if (crtc == 0) { | 1312 | if (crtc == 0) { |
1049 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | 1313 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1050 | EVERGREEN_CRTC0_REGISTER_OFFSET); | 1314 | EVERGREEN_CRTC0_REGISTER_OFFSET); |
1051 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | 1315 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1052 | EVERGREEN_CRTC0_REGISTER_OFFSET); | 1316 | EVERGREEN_CRTC0_REGISTER_OFFSET); |
1053 | ret |= RADEON_SCANOUTPOS_VALID; | 1317 | ret |= DRM_SCANOUTPOS_VALID; |
1054 | } | 1318 | } |
1055 | if (crtc == 1) { | 1319 | if (crtc == 1) { |
1056 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | 1320 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1057 | EVERGREEN_CRTC1_REGISTER_OFFSET); | 1321 | EVERGREEN_CRTC1_REGISTER_OFFSET); |
1058 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | 1322 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1059 | EVERGREEN_CRTC1_REGISTER_OFFSET); | 1323 | EVERGREEN_CRTC1_REGISTER_OFFSET); |
1060 | ret |= RADEON_SCANOUTPOS_VALID; | 1324 | ret |= DRM_SCANOUTPOS_VALID; |
1061 | } | 1325 | } |
1062 | if (crtc == 2) { | 1326 | if (crtc == 2) { |
1063 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | 1327 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1064 | EVERGREEN_CRTC2_REGISTER_OFFSET); | 1328 | EVERGREEN_CRTC2_REGISTER_OFFSET); |
1065 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | 1329 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1066 | EVERGREEN_CRTC2_REGISTER_OFFSET); | 1330 | EVERGREEN_CRTC2_REGISTER_OFFSET); |
1067 | ret |= RADEON_SCANOUTPOS_VALID; | 1331 | ret |= DRM_SCANOUTPOS_VALID; |
1068 | } | 1332 | } |
1069 | if (crtc == 3) { | 1333 | if (crtc == 3) { |
1070 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | 1334 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1071 | EVERGREEN_CRTC3_REGISTER_OFFSET); | 1335 | EVERGREEN_CRTC3_REGISTER_OFFSET); |
1072 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | 1336 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1073 | EVERGREEN_CRTC3_REGISTER_OFFSET); | 1337 | EVERGREEN_CRTC3_REGISTER_OFFSET); |
1074 | ret |= RADEON_SCANOUTPOS_VALID; | 1338 | ret |= DRM_SCANOUTPOS_VALID; |
1075 | } | 1339 | } |
1076 | if (crtc == 4) { | 1340 | if (crtc == 4) { |
1077 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | 1341 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1078 | EVERGREEN_CRTC4_REGISTER_OFFSET); | 1342 | EVERGREEN_CRTC4_REGISTER_OFFSET); |
1079 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | 1343 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1080 | EVERGREEN_CRTC4_REGISTER_OFFSET); | 1344 | EVERGREEN_CRTC4_REGISTER_OFFSET); |
1081 | ret |= RADEON_SCANOUTPOS_VALID; | 1345 | ret |= DRM_SCANOUTPOS_VALID; |
1082 | } | 1346 | } |
1083 | if (crtc == 5) { | 1347 | if (crtc == 5) { |
1084 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | 1348 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
1085 | EVERGREEN_CRTC5_REGISTER_OFFSET); | 1349 | EVERGREEN_CRTC5_REGISTER_OFFSET); |
1086 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | 1350 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1087 | EVERGREEN_CRTC5_REGISTER_OFFSET); | 1351 | EVERGREEN_CRTC5_REGISTER_OFFSET); |
1088 | ret |= RADEON_SCANOUTPOS_VALID; | 1352 | ret |= DRM_SCANOUTPOS_VALID; |
1089 | } | 1353 | } |
1090 | } else if (ASIC_IS_AVIVO(rdev)) { | 1354 | } else if (ASIC_IS_AVIVO(rdev)) { |
1091 | if (crtc == 0) { | 1355 | if (crtc == 0) { |
1092 | vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); | 1356 | vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); |
1093 | position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); | 1357 | position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); |
1094 | ret |= RADEON_SCANOUTPOS_VALID; | 1358 | ret |= DRM_SCANOUTPOS_VALID; |
1095 | } | 1359 | } |
1096 | if (crtc == 1) { | 1360 | if (crtc == 1) { |
1097 | vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); | 1361 | vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); |
1098 | position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); | 1362 | position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); |
1099 | ret |= RADEON_SCANOUTPOS_VALID; | 1363 | ret |= DRM_SCANOUTPOS_VALID; |
1100 | } | 1364 | } |
1101 | } else { | 1365 | } else { |
1102 | /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ | 1366 | /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ |
@@ -1112,7 +1376,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, | |||
1112 | if (!(stat_crtc & 1)) | 1376 | if (!(stat_crtc & 1)) |
1113 | in_vbl = false; | 1377 | in_vbl = false; |
1114 | 1378 | ||
1115 | ret |= RADEON_SCANOUTPOS_VALID; | 1379 | ret |= DRM_SCANOUTPOS_VALID; |
1116 | } | 1380 | } |
1117 | if (crtc == 1) { | 1381 | if (crtc == 1) { |
1118 | vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & | 1382 | vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & |
@@ -1122,7 +1386,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, | |||
1122 | if (!(stat_crtc & 1)) | 1386 | if (!(stat_crtc & 1)) |
1123 | in_vbl = false; | 1387 | in_vbl = false; |
1124 | 1388 | ||
1125 | ret |= RADEON_SCANOUTPOS_VALID; | 1389 | ret |= DRM_SCANOUTPOS_VALID; |
1126 | } | 1390 | } |
1127 | } | 1391 | } |
1128 | 1392 | ||
@@ -1133,13 +1397,13 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, | |||
1133 | /* Valid vblank area boundaries from gpu retrieved? */ | 1397 | /* Valid vblank area boundaries from gpu retrieved? */ |
1134 | if (vbl > 0) { | 1398 | if (vbl > 0) { |
1135 | /* Yes: Decode. */ | 1399 | /* Yes: Decode. */ |
1136 | ret |= RADEON_SCANOUTPOS_ACCURATE; | 1400 | ret |= DRM_SCANOUTPOS_ACCURATE; |
1137 | vbl_start = vbl & 0x1fff; | 1401 | vbl_start = vbl & 0x1fff; |
1138 | vbl_end = (vbl >> 16) & 0x1fff; | 1402 | vbl_end = (vbl >> 16) & 0x1fff; |
1139 | } | 1403 | } |
1140 | else { | 1404 | else { |
1141 | /* No: Fake something reasonable which gives at least ok results. */ | 1405 | /* No: Fake something reasonable which gives at least ok results. */ |
1142 | vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay; | 1406 | vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; |
1143 | vbl_end = 0; | 1407 | vbl_end = 0; |
1144 | } | 1408 | } |
1145 | 1409 | ||
@@ -1155,7 +1419,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, | |||
1155 | 1419 | ||
1156 | /* Inside "upper part" of vblank area? Apply corrective offset if so: */ | 1420 | /* Inside "upper part" of vblank area? Apply corrective offset if so: */ |
1157 | if (in_vbl && (*vpos >= vbl_start)) { | 1421 | if (in_vbl && (*vpos >= vbl_start)) { |
1158 | vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal; | 1422 | vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; |
1159 | *vpos = *vpos - vtotal; | 1423 | *vpos = *vpos - vtotal; |
1160 | } | 1424 | } |
1161 | 1425 | ||
@@ -1164,7 +1428,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, | |||
1164 | 1428 | ||
1165 | /* In vblank? */ | 1429 | /* In vblank? */ |
1166 | if (in_vbl) | 1430 | if (in_vbl) |
1167 | ret |= RADEON_SCANOUTPOS_INVBL; | 1431 | ret |= DRM_SCANOUTPOS_INVBL; |
1168 | 1432 | ||
1169 | return ret; | 1433 | return ret; |
1170 | } | 1434 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 88e4ea925900..a92d2a5cea90 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -48,9 +48,10 @@ | |||
48 | * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen | 48 | * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen |
49 | * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) | 49 | * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) |
50 | * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs | 50 | * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs |
51 | * 2.8.0 - pageflip support | ||
51 | */ | 52 | */ |
52 | #define KMS_DRIVER_MAJOR 2 | 53 | #define KMS_DRIVER_MAJOR 2 |
53 | #define KMS_DRIVER_MINOR 7 | 54 | #define KMS_DRIVER_MINOR 8 |
54 | #define KMS_DRIVER_PATCHLEVEL 0 | 55 | #define KMS_DRIVER_PATCHLEVEL 0 |
55 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 56 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
56 | int radeon_driver_unload_kms(struct drm_device *dev); | 57 | int radeon_driver_unload_kms(struct drm_device *dev); |
@@ -66,6 +67,10 @@ int radeon_resume_kms(struct drm_device *dev); | |||
66 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc); | 67 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc); |
67 | int radeon_enable_vblank_kms(struct drm_device *dev, int crtc); | 68 | int radeon_enable_vblank_kms(struct drm_device *dev, int crtc); |
68 | void radeon_disable_vblank_kms(struct drm_device *dev, int crtc); | 69 | void radeon_disable_vblank_kms(struct drm_device *dev, int crtc); |
70 | int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, | ||
71 | int *max_error, | ||
72 | struct timeval *vblank_time, | ||
73 | unsigned flags); | ||
69 | void radeon_driver_irq_preinstall_kms(struct drm_device *dev); | 74 | void radeon_driver_irq_preinstall_kms(struct drm_device *dev); |
70 | int radeon_driver_irq_postinstall_kms(struct drm_device *dev); | 75 | int radeon_driver_irq_postinstall_kms(struct drm_device *dev); |
71 | void radeon_driver_irq_uninstall_kms(struct drm_device *dev); | 76 | void radeon_driver_irq_uninstall_kms(struct drm_device *dev); |
@@ -74,6 +79,8 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, | |||
74 | struct drm_file *file_priv); | 79 | struct drm_file *file_priv); |
75 | int radeon_gem_object_init(struct drm_gem_object *obj); | 80 | int radeon_gem_object_init(struct drm_gem_object *obj); |
76 | void radeon_gem_object_free(struct drm_gem_object *obj); | 81 | void radeon_gem_object_free(struct drm_gem_object *obj); |
82 | extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, | ||
83 | int *vpos, int *hpos); | ||
77 | extern struct drm_ioctl_desc radeon_ioctls_kms[]; | 84 | extern struct drm_ioctl_desc radeon_ioctls_kms[]; |
78 | extern int radeon_max_kms_ioctl; | 85 | extern int radeon_max_kms_ioctl; |
79 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma); | 86 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma); |
@@ -277,6 +284,8 @@ static struct drm_driver kms_driver = { | |||
277 | .get_vblank_counter = radeon_get_vblank_counter_kms, | 284 | .get_vblank_counter = radeon_get_vblank_counter_kms, |
278 | .enable_vblank = radeon_enable_vblank_kms, | 285 | .enable_vblank = radeon_enable_vblank_kms, |
279 | .disable_vblank = radeon_disable_vblank_kms, | 286 | .disable_vblank = radeon_disable_vblank_kms, |
287 | .get_vblank_timestamp = radeon_get_vblank_timestamp_kms, | ||
288 | .get_scanout_position = radeon_get_crtc_scanoutpos, | ||
280 | #if defined(CONFIG_DEBUG_FS) | 289 | #if defined(CONFIG_DEBUG_FS) |
281 | .debugfs_init = radeon_debugfs_init, | 290 | .debugfs_init = radeon_debugfs_init, |
282 | .debugfs_cleanup = radeon_debugfs_cleanup, | 291 | .debugfs_cleanup = radeon_debugfs_cleanup, |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 041943df966b..e4e64a80b58d 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -713,7 +713,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
713 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B | 713 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B |
714 | * | 714 | * |
715 | * DCE 4.0 | 715 | * DCE 4.0 |
716 | * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B). | 716 | * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). |
717 | * Supports up to 6 digital outputs | 717 | * Supports up to 6 digital outputs |
718 | * - 6 DIG encoder blocks. | 718 | * - 6 DIG encoder blocks. |
719 | * - DIG to PHY mapping is hardcoded | 719 | * - DIG to PHY mapping is hardcoded |
@@ -724,6 +724,12 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
724 | * DIG5 drives UNIPHY2 link A, A+B | 724 | * DIG5 drives UNIPHY2 link A, A+B |
725 | * DIG6 drives UNIPHY2 link B | 725 | * DIG6 drives UNIPHY2 link B |
726 | * | 726 | * |
727 | * DCE 4.1 | ||
728 | * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). | ||
729 | * Supports up to 6 digital outputs | ||
730 | * - 2 DIG encoder blocks. | ||
731 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B | ||
732 | * | ||
727 | * Routing | 733 | * Routing |
728 | * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) | 734 | * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) |
729 | * Examples: | 735 | * Examples: |
@@ -904,9 +910,15 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
904 | else | 910 | else |
905 | args.v3.ucLaneNum = 4; | 911 | args.v3.ucLaneNum = 4; |
906 | 912 | ||
907 | if (dig->linkb) { | 913 | if (ASIC_IS_DCE41(rdev)) { |
908 | args.v3.acConfig.ucLinkSel = 1; | 914 | args.v3.acConfig.ucEncoderSel = dig->dig_encoder; |
909 | args.v3.acConfig.ucEncoderSel = 1; | 915 | if (dig->linkb) |
916 | args.v3.acConfig.ucLinkSel = 1; | ||
917 | } else { | ||
918 | if (dig->linkb) { | ||
919 | args.v3.acConfig.ucLinkSel = 1; | ||
920 | args.v3.acConfig.ucEncoderSel = 1; | ||
921 | } | ||
910 | } | 922 | } |
911 | 923 | ||
912 | /* Select the PLL for the PHY | 924 | /* Select the PLL for the PHY |
@@ -1044,6 +1056,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action) | |||
1044 | 1056 | ||
1045 | union external_encoder_control { | 1057 | union external_encoder_control { |
1046 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; | 1058 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; |
1059 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3; | ||
1047 | }; | 1060 | }; |
1048 | 1061 | ||
1049 | static void | 1062 | static void |
@@ -1054,6 +1067,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, | |||
1054 | struct drm_device *dev = encoder->dev; | 1067 | struct drm_device *dev = encoder->dev; |
1055 | struct radeon_device *rdev = dev->dev_private; | 1068 | struct radeon_device *rdev = dev->dev_private; |
1056 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1069 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1070 | struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); | ||
1057 | union external_encoder_control args; | 1071 | union external_encoder_control args; |
1058 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1072 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
1059 | int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); | 1073 | int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); |
@@ -1061,6 +1075,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, | |||
1061 | int dp_clock = 0; | 1075 | int dp_clock = 0; |
1062 | int dp_lane_count = 0; | 1076 | int dp_lane_count = 0; |
1063 | int connector_object_id = 0; | 1077 | int connector_object_id = 0; |
1078 | u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
1064 | 1079 | ||
1065 | if (connector) { | 1080 | if (connector) { |
1066 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 1081 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
@@ -1099,6 +1114,37 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, | |||
1099 | else | 1114 | else |
1100 | args.v1.sDigEncoder.ucLaneNum = 4; | 1115 | args.v1.sDigEncoder.ucLaneNum = 4; |
1101 | break; | 1116 | break; |
1117 | case 3: | ||
1118 | args.v3.sExtEncoder.ucAction = action; | ||
1119 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) | ||
1120 | args.v3.sExtEncoder.usConnectorId = connector_object_id; | ||
1121 | else | ||
1122 | args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
1123 | args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
1124 | |||
1125 | if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | ||
1126 | if (dp_clock == 270000) | ||
1127 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; | ||
1128 | else if (dp_clock == 540000) | ||
1129 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; | ||
1130 | args.v3.sExtEncoder.ucLaneNum = dp_lane_count; | ||
1131 | } else if (radeon_encoder->pixel_clock > 165000) | ||
1132 | args.v3.sExtEncoder.ucLaneNum = 8; | ||
1133 | else | ||
1134 | args.v3.sExtEncoder.ucLaneNum = 4; | ||
1135 | switch (ext_enum) { | ||
1136 | case GRAPH_OBJECT_ENUM_ID1: | ||
1137 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1; | ||
1138 | break; | ||
1139 | case GRAPH_OBJECT_ENUM_ID2: | ||
1140 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2; | ||
1141 | break; | ||
1142 | case GRAPH_OBJECT_ENUM_ID3: | ||
1143 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; | ||
1144 | break; | ||
1145 | } | ||
1146 | args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; | ||
1147 | break; | ||
1102 | default: | 1148 | default: |
1103 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | 1149 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); |
1104 | return; | 1150 | return; |
@@ -1289,12 +1335,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1289 | switch (mode) { | 1335 | switch (mode) { |
1290 | case DRM_MODE_DPMS_ON: | 1336 | case DRM_MODE_DPMS_ON: |
1291 | default: | 1337 | default: |
1292 | action = ATOM_ENABLE; | 1338 | if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) |
1339 | action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT; | ||
1340 | else | ||
1341 | action = ATOM_ENABLE; | ||
1293 | break; | 1342 | break; |
1294 | case DRM_MODE_DPMS_STANDBY: | 1343 | case DRM_MODE_DPMS_STANDBY: |
1295 | case DRM_MODE_DPMS_SUSPEND: | 1344 | case DRM_MODE_DPMS_SUSPEND: |
1296 | case DRM_MODE_DPMS_OFF: | 1345 | case DRM_MODE_DPMS_OFF: |
1297 | action = ATOM_DISABLE; | 1346 | if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) |
1347 | action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT; | ||
1348 | else | ||
1349 | action = ATOM_DISABLE; | ||
1298 | break; | 1350 | break; |
1299 | } | 1351 | } |
1300 | atombios_external_encoder_setup(encoder, ext_encoder, action); | 1352 | atombios_external_encoder_setup(encoder, ext_encoder, action); |
@@ -1483,6 +1535,11 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | |||
1483 | struct radeon_encoder_atom_dig *dig; | 1535 | struct radeon_encoder_atom_dig *dig; |
1484 | uint32_t dig_enc_in_use = 0; | 1536 | uint32_t dig_enc_in_use = 0; |
1485 | 1537 | ||
1538 | /* on DCE41 and encoder can driver any phy so just crtc id */ | ||
1539 | if (ASIC_IS_DCE41(rdev)) { | ||
1540 | return radeon_crtc->crtc_id; | ||
1541 | } | ||
1542 | |||
1486 | if (ASIC_IS_DCE4(rdev)) { | 1543 | if (ASIC_IS_DCE4(rdev)) { |
1487 | dig = radeon_encoder->enc_priv; | 1544 | dig = radeon_encoder->enc_priv; |
1488 | switch (radeon_encoder->encoder_id) { | 1545 | switch (radeon_encoder->encoder_id) { |
@@ -1610,7 +1667,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1610 | } | 1667 | } |
1611 | 1668 | ||
1612 | if (ext_encoder) { | 1669 | if (ext_encoder) { |
1613 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | 1670 | if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) { |
1671 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1672 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); | ||
1673 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1674 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); | ||
1675 | } else | ||
1676 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | ||
1614 | } | 1677 | } |
1615 | 1678 | ||
1616 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 1679 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
@@ -2029,6 +2092,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t | |||
2029 | case ENCODER_OBJECT_ID_TITFP513: | 2092 | case ENCODER_OBJECT_ID_TITFP513: |
2030 | case ENCODER_OBJECT_ID_VT1623: | 2093 | case ENCODER_OBJECT_ID_VT1623: |
2031 | case ENCODER_OBJECT_ID_HDMI_SI1930: | 2094 | case ENCODER_OBJECT_ID_HDMI_SI1930: |
2095 | case ENCODER_OBJECT_ID_TRAVIS: | ||
2096 | case ENCODER_OBJECT_ID_NUTMEG: | ||
2032 | /* these are handled by the primary encoders */ | 2097 | /* these are handled by the primary encoders */ |
2033 | radeon_encoder->is_ext_encoder = true; | 2098 | radeon_encoder->is_ext_encoder = true; |
2034 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | 2099 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index e329066dcabd..4c222d5437d1 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h | |||
@@ -80,6 +80,7 @@ enum radeon_family { | |||
80 | CHIP_JUNIPER, | 80 | CHIP_JUNIPER, |
81 | CHIP_CYPRESS, | 81 | CHIP_CYPRESS, |
82 | CHIP_HEMLOCK, | 82 | CHIP_HEMLOCK, |
83 | CHIP_PALM, | ||
83 | CHIP_LAST, | 84 | CHIP_LAST, |
84 | }; | 85 | }; |
85 | 86 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index daacb281dfaf..171b0b2e3a64 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include "drm.h" | 38 | #include "drm.h" |
39 | #include "radeon_reg.h" | 39 | #include "radeon_reg.h" |
40 | #include "radeon.h" | 40 | #include "radeon.h" |
41 | #include "radeon_trace.h" | ||
41 | 42 | ||
42 | int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) | 43 | int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) |
43 | { | 44 | { |
@@ -57,6 +58,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) | |||
57 | } else | 58 | } else |
58 | radeon_fence_ring_emit(rdev, fence); | 59 | radeon_fence_ring_emit(rdev, fence); |
59 | 60 | ||
61 | trace_radeon_fence_emit(rdev->ddev, fence->seq); | ||
60 | fence->emited = true; | 62 | fence->emited = true; |
61 | list_del(&fence->list); | 63 | list_del(&fence->list); |
62 | list_add_tail(&fence->list, &rdev->fence_drv.emited); | 64 | list_add_tail(&fence->list, &rdev->fence_drv.emited); |
@@ -213,6 +215,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) | |||
213 | retry: | 215 | retry: |
214 | /* save current sequence used to check for GPU lockup */ | 216 | /* save current sequence used to check for GPU lockup */ |
215 | seq = rdev->fence_drv.last_seq; | 217 | seq = rdev->fence_drv.last_seq; |
218 | trace_radeon_fence_wait_begin(rdev->ddev, seq); | ||
216 | if (intr) { | 219 | if (intr) { |
217 | radeon_irq_kms_sw_irq_get(rdev); | 220 | radeon_irq_kms_sw_irq_get(rdev); |
218 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, | 221 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, |
@@ -227,6 +230,7 @@ retry: | |||
227 | radeon_fence_signaled(fence), timeout); | 230 | radeon_fence_signaled(fence), timeout); |
228 | radeon_irq_kms_sw_irq_put(rdev); | 231 | radeon_irq_kms_sw_irq_put(rdev); |
229 | } | 232 | } |
233 | trace_radeon_fence_wait_end(rdev->ddev, seq); | ||
230 | if (unlikely(!radeon_fence_signaled(fence))) { | 234 | if (unlikely(!radeon_fence_signaled(fence))) { |
231 | /* we were interrupted for some reason and fence isn't | 235 | /* we were interrupted for some reason and fence isn't |
232 | * isn't signaled yet, resume wait | 236 | * isn't signaled yet, resume wait |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index a108c7ed14f5..c6861bb751ad 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -71,8 +71,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) | |||
71 | rdev->irq.gui_idle = false; | 71 | rdev->irq.gui_idle = false; |
72 | for (i = 0; i < rdev->num_crtc; i++) | 72 | for (i = 0; i < rdev->num_crtc; i++) |
73 | rdev->irq.crtc_vblank_int[i] = false; | 73 | rdev->irq.crtc_vblank_int[i] = false; |
74 | for (i = 0; i < 6; i++) | 74 | for (i = 0; i < 6; i++) { |
75 | rdev->irq.hpd[i] = false; | 75 | rdev->irq.hpd[i] = false; |
76 | rdev->irq.pflip[i] = false; | ||
77 | } | ||
76 | radeon_irq_set(rdev); | 78 | radeon_irq_set(rdev); |
77 | /* Clear bits */ | 79 | /* Clear bits */ |
78 | radeon_irq_process(rdev); | 80 | radeon_irq_process(rdev); |
@@ -101,8 +103,10 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
101 | rdev->irq.gui_idle = false; | 103 | rdev->irq.gui_idle = false; |
102 | for (i = 0; i < rdev->num_crtc; i++) | 104 | for (i = 0; i < rdev->num_crtc; i++) |
103 | rdev->irq.crtc_vblank_int[i] = false; | 105 | rdev->irq.crtc_vblank_int[i] = false; |
104 | for (i = 0; i < 6; i++) | 106 | for (i = 0; i < 6; i++) { |
105 | rdev->irq.hpd[i] = false; | 107 | rdev->irq.hpd[i] = false; |
108 | rdev->irq.pflip[i] = false; | ||
109 | } | ||
106 | radeon_irq_set(rdev); | 110 | radeon_irq_set(rdev); |
107 | } | 111 | } |
108 | 112 | ||
@@ -121,7 +125,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
121 | * chips. Disable MSI on them for now. | 125 | * chips. Disable MSI on them for now. |
122 | */ | 126 | */ |
123 | if ((rdev->family >= CHIP_RV380) && | 127 | if ((rdev->family >= CHIP_RV380) && |
124 | (!(rdev->flags & RADEON_IS_IGP)) && | 128 | ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) && |
125 | (!(rdev->flags & RADEON_IS_AGP))) { | 129 | (!(rdev->flags & RADEON_IS_AGP))) { |
126 | int ret = pci_enable_msi(rdev->pdev); | 130 | int ret = pci_enable_msi(rdev->pdev); |
127 | if (!ret) { | 131 | if (!ret) { |
@@ -175,3 +179,34 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev) | |||
175 | spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); | 179 | spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); |
176 | } | 180 | } |
177 | 181 | ||
182 | void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) | ||
183 | { | ||
184 | unsigned long irqflags; | ||
185 | |||
186 | if (crtc < 0 || crtc >= rdev->num_crtc) | ||
187 | return; | ||
188 | |||
189 | spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); | ||
190 | if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) { | ||
191 | rdev->irq.pflip[crtc] = true; | ||
192 | radeon_irq_set(rdev); | ||
193 | } | ||
194 | spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); | ||
195 | } | ||
196 | |||
197 | void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) | ||
198 | { | ||
199 | unsigned long irqflags; | ||
200 | |||
201 | if (crtc < 0 || crtc >= rdev->num_crtc) | ||
202 | return; | ||
203 | |||
204 | spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); | ||
205 | BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0); | ||
206 | if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) { | ||
207 | rdev->irq.pflip[crtc] = false; | ||
208 | radeon_irq_set(rdev); | ||
209 | } | ||
210 | spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); | ||
211 | } | ||
212 | |||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 8fbbe1c6ebbd..4bf423ca4c12 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -277,6 +277,27 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) | |||
277 | radeon_irq_set(rdev); | 277 | radeon_irq_set(rdev); |
278 | } | 278 | } |
279 | 279 | ||
280 | int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, | ||
281 | int *max_error, | ||
282 | struct timeval *vblank_time, | ||
283 | unsigned flags) | ||
284 | { | ||
285 | struct drm_crtc *drmcrtc; | ||
286 | struct radeon_device *rdev = dev->dev_private; | ||
287 | |||
288 | if (crtc < 0 || crtc >= dev->num_crtcs) { | ||
289 | DRM_ERROR("Invalid crtc %d\n", crtc); | ||
290 | return -EINVAL; | ||
291 | } | ||
292 | |||
293 | /* Get associated drm_crtc: */ | ||
294 | drmcrtc = &rdev->mode_info.crtcs[crtc]->base; | ||
295 | |||
296 | /* Helper routine in DRM core does all the work: */ | ||
297 | return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, | ||
298 | vblank_time, flags, | ||
299 | drmcrtc); | ||
300 | } | ||
280 | 301 | ||
281 | /* | 302 | /* |
282 | * IOCTL. | 303 | * IOCTL. |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index e301c6f9e059..f406f02bf14e 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -277,6 +277,9 @@ struct radeon_crtc { | |||
277 | fixed20_12 hsc; | 277 | fixed20_12 hsc; |
278 | struct drm_display_mode native_mode; | 278 | struct drm_display_mode native_mode; |
279 | int pll_id; | 279 | int pll_id; |
280 | /* page flipping */ | ||
281 | struct radeon_unpin_work *unpin_work; | ||
282 | int deferred_flip_completion; | ||
280 | }; | 283 | }; |
281 | 284 | ||
282 | struct radeon_encoder_primary_dac { | 285 | struct radeon_encoder_primary_dac { |
@@ -442,10 +445,6 @@ struct radeon_framebuffer { | |||
442 | struct drm_gem_object *obj; | 445 | struct drm_gem_object *obj; |
443 | }; | 446 | }; |
444 | 447 | ||
445 | /* radeon_get_crtc_scanoutpos() return flags */ | ||
446 | #define RADEON_SCANOUTPOS_VALID (1 << 0) | ||
447 | #define RADEON_SCANOUTPOS_INVBL (1 << 1) | ||
448 | #define RADEON_SCANOUTPOS_ACCURATE (1 << 2) | ||
449 | 448 | ||
450 | extern enum radeon_tv_std | 449 | extern enum radeon_tv_std |
451 | radeon_combios_get_tv_info(struct radeon_device *rdev); | 450 | radeon_combios_get_tv_info(struct radeon_device *rdev); |
@@ -562,7 +561,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, | |||
562 | extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, | 561 | extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, |
563 | int x, int y); | 562 | int x, int y); |
564 | 563 | ||
565 | extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos); | 564 | extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, |
565 | int *vpos, int *hpos); | ||
566 | 566 | ||
567 | extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); | 567 | extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); |
568 | extern struct edid * | 568 | extern struct edid * |
@@ -662,4 +662,7 @@ int radeon_fbdev_total_size(struct radeon_device *rdev); | |||
662 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); | 662 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); |
663 | 663 | ||
664 | void radeon_fb_output_poll_changed(struct radeon_device *rdev); | 664 | void radeon_fb_output_poll_changed(struct radeon_device *rdev); |
665 | |||
666 | void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); | ||
667 | |||
665 | #endif | 668 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index a598d0049aa5..7d6b8e88f746 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <drm/drmP.h> | 34 | #include <drm/drmP.h> |
35 | #include "radeon_drm.h" | 35 | #include "radeon_drm.h" |
36 | #include "radeon.h" | 36 | #include "radeon.h" |
37 | #include "radeon_trace.h" | ||
37 | 38 | ||
38 | 39 | ||
39 | int radeon_ttm_init(struct radeon_device *rdev); | 40 | int radeon_ttm_init(struct radeon_device *rdev); |
@@ -146,6 +147,7 @@ retry: | |||
146 | list_add_tail(&bo->list, &rdev->gem.objects); | 147 | list_add_tail(&bo->list, &rdev->gem.objects); |
147 | mutex_unlock(&bo->rdev->gem.mutex); | 148 | mutex_unlock(&bo->rdev->gem.mutex); |
148 | } | 149 | } |
150 | trace_radeon_bo_create(bo); | ||
149 | return 0; | 151 | return 0; |
150 | } | 152 | } |
151 | 153 | ||
@@ -302,34 +304,9 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | |||
302 | struct list_head *head) | 304 | struct list_head *head) |
303 | { | 305 | { |
304 | if (lobj->wdomain) { | 306 | if (lobj->wdomain) { |
305 | list_add(&lobj->list, head); | 307 | list_add(&lobj->tv.head, head); |
306 | } else { | 308 | } else { |
307 | list_add_tail(&lobj->list, head); | 309 | list_add_tail(&lobj->tv.head, head); |
308 | } | ||
309 | } | ||
310 | |||
311 | int radeon_bo_list_reserve(struct list_head *head) | ||
312 | { | ||
313 | struct radeon_bo_list *lobj; | ||
314 | int r; | ||
315 | |||
316 | list_for_each_entry(lobj, head, list){ | ||
317 | r = radeon_bo_reserve(lobj->bo, false); | ||
318 | if (unlikely(r != 0)) | ||
319 | return r; | ||
320 | lobj->reserved = true; | ||
321 | } | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | void radeon_bo_list_unreserve(struct list_head *head) | ||
326 | { | ||
327 | struct radeon_bo_list *lobj; | ||
328 | |||
329 | list_for_each_entry(lobj, head, list) { | ||
330 | /* only unreserve object we successfully reserved */ | ||
331 | if (lobj->reserved && radeon_bo_is_reserved(lobj->bo)) | ||
332 | radeon_bo_unreserve(lobj->bo); | ||
333 | } | 310 | } |
334 | } | 311 | } |
335 | 312 | ||
@@ -340,14 +317,11 @@ int radeon_bo_list_validate(struct list_head *head) | |||
340 | u32 domain; | 317 | u32 domain; |
341 | int r; | 318 | int r; |
342 | 319 | ||
343 | list_for_each_entry(lobj, head, list) { | 320 | r = ttm_eu_reserve_buffers(head); |
344 | lobj->reserved = false; | ||
345 | } | ||
346 | r = radeon_bo_list_reserve(head); | ||
347 | if (unlikely(r != 0)) { | 321 | if (unlikely(r != 0)) { |
348 | return r; | 322 | return r; |
349 | } | 323 | } |
350 | list_for_each_entry(lobj, head, list) { | 324 | list_for_each_entry(lobj, head, tv.head) { |
351 | bo = lobj->bo; | 325 | bo = lobj->bo; |
352 | if (!bo->pin_count) { | 326 | if (!bo->pin_count) { |
353 | domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; | 327 | domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; |
@@ -370,25 +344,6 @@ int radeon_bo_list_validate(struct list_head *head) | |||
370 | return 0; | 344 | return 0; |
371 | } | 345 | } |
372 | 346 | ||
373 | void radeon_bo_list_fence(struct list_head *head, void *fence) | ||
374 | { | ||
375 | struct radeon_bo_list *lobj; | ||
376 | struct radeon_bo *bo; | ||
377 | struct radeon_fence *old_fence = NULL; | ||
378 | |||
379 | list_for_each_entry(lobj, head, list) { | ||
380 | bo = lobj->bo; | ||
381 | spin_lock(&bo->tbo.lock); | ||
382 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; | ||
383 | bo->tbo.sync_obj = radeon_fence_ref(fence); | ||
384 | bo->tbo.sync_obj_arg = NULL; | ||
385 | spin_unlock(&bo->tbo.lock); | ||
386 | if (old_fence) { | ||
387 | radeon_fence_unref(&old_fence); | ||
388 | } | ||
389 | } | ||
390 | } | ||
391 | |||
392 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 347 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
393 | struct vm_area_struct *vma) | 348 | struct vm_area_struct *vma) |
394 | { | 349 | { |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index d143702b244a..22d4c237dea5 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -126,12 +126,12 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, | |||
126 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | 126 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); |
127 | if (unlikely(r != 0)) | 127 | if (unlikely(r != 0)) |
128 | return r; | 128 | return r; |
129 | spin_lock(&bo->tbo.lock); | 129 | spin_lock(&bo->tbo.bdev->fence_lock); |
130 | if (mem_type) | 130 | if (mem_type) |
131 | *mem_type = bo->tbo.mem.mem_type; | 131 | *mem_type = bo->tbo.mem.mem_type; |
132 | if (bo->tbo.sync_obj) | 132 | if (bo->tbo.sync_obj) |
133 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | 133 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
134 | spin_unlock(&bo->tbo.lock); | 134 | spin_unlock(&bo->tbo.bdev->fence_lock); |
135 | ttm_bo_unreserve(&bo->tbo); | 135 | ttm_bo_unreserve(&bo->tbo); |
136 | return r; | 136 | return r; |
137 | } | 137 | } |
@@ -152,10 +152,7 @@ extern int radeon_bo_init(struct radeon_device *rdev); | |||
152 | extern void radeon_bo_fini(struct radeon_device *rdev); | 152 | extern void radeon_bo_fini(struct radeon_device *rdev); |
153 | extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | 153 | extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, |
154 | struct list_head *head); | 154 | struct list_head *head); |
155 | extern int radeon_bo_list_reserve(struct list_head *head); | ||
156 | extern void radeon_bo_list_unreserve(struct list_head *head); | ||
157 | extern int radeon_bo_list_validate(struct list_head *head); | 155 | extern int radeon_bo_list_validate(struct list_head *head); |
158 | extern void radeon_bo_list_fence(struct list_head *head, void *fence); | ||
159 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 156 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
160 | struct vm_area_struct *vma); | 157 | struct vm_area_struct *vma); |
161 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, | 158 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 8c9b2ef32c68..4de7776bd1c5 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -449,6 +449,9 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, | |||
449 | case THERMAL_TYPE_EVERGREEN: | 449 | case THERMAL_TYPE_EVERGREEN: |
450 | temp = evergreen_get_temp(rdev); | 450 | temp = evergreen_get_temp(rdev); |
451 | break; | 451 | break; |
452 | case THERMAL_TYPE_SUMO: | ||
453 | temp = sumo_get_temp(rdev); | ||
454 | break; | ||
452 | default: | 455 | default: |
453 | temp = 0; | 456 | temp = 0; |
454 | break; | 457 | break; |
@@ -487,6 +490,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev) | |||
487 | case THERMAL_TYPE_RV6XX: | 490 | case THERMAL_TYPE_RV6XX: |
488 | case THERMAL_TYPE_RV770: | 491 | case THERMAL_TYPE_RV770: |
489 | case THERMAL_TYPE_EVERGREEN: | 492 | case THERMAL_TYPE_EVERGREEN: |
493 | case THERMAL_TYPE_SUMO: | ||
490 | rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); | 494 | rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); |
491 | if (IS_ERR(rdev->pm.int_hwmon_dev)) { | 495 | if (IS_ERR(rdev->pm.int_hwmon_dev)) { |
492 | err = PTR_ERR(rdev->pm.int_hwmon_dev); | 496 | err = PTR_ERR(rdev->pm.int_hwmon_dev); |
@@ -720,9 +724,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev) | |||
720 | */ | 724 | */ |
721 | for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { | 725 | for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { |
722 | if (rdev->pm.active_crtcs & (1 << crtc)) { | 726 | if (rdev->pm.active_crtcs & (1 << crtc)) { |
723 | vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos); | 727 | vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos); |
724 | if ((vbl_status & RADEON_SCANOUTPOS_VALID) && | 728 | if ((vbl_status & DRM_SCANOUTPOS_VALID) && |
725 | !(vbl_status & RADEON_SCANOUTPOS_INVBL)) | 729 | !(vbl_status & DRM_SCANOUTPOS_INVBL)) |
726 | in_vbl = false; | 730 | in_vbl = false; |
727 | } | 731 | } |
728 | } | 732 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 64928814de53..0a310b7f71c3 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -422,6 +422,7 @@ | |||
422 | # define RADEON_CRTC_CSYNC_EN (1 << 4) | 422 | # define RADEON_CRTC_CSYNC_EN (1 << 4) |
423 | # define RADEON_CRTC_ICON_EN (1 << 15) | 423 | # define RADEON_CRTC_ICON_EN (1 << 15) |
424 | # define RADEON_CRTC_CUR_EN (1 << 16) | 424 | # define RADEON_CRTC_CUR_EN (1 << 16) |
425 | # define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17) | ||
425 | # define RADEON_CRTC_CUR_MODE_MASK (7 << 20) | 426 | # define RADEON_CRTC_CUR_MODE_MASK (7 << 20) |
426 | # define RADEON_CRTC_CUR_MODE_SHIFT 20 | 427 | # define RADEON_CRTC_CUR_MODE_SHIFT 20 |
427 | # define RADEON_CRTC_CUR_MODE_MONO 0 | 428 | # define RADEON_CRTC_CUR_MODE_MONO 0 |
@@ -509,6 +510,8 @@ | |||
509 | # define RADEON_CRTC_TILE_EN (1 << 15) | 510 | # define RADEON_CRTC_TILE_EN (1 << 15) |
510 | # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) | 511 | # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) |
511 | # define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17) | 512 | # define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17) |
513 | # define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28) | ||
514 | # define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29) | ||
512 | 515 | ||
513 | #define R300_CRTC_TILE_X0_Y0 0x0350 | 516 | #define R300_CRTC_TILE_X0_Y0 0x0350 |
514 | #define R300_CRTC2_TILE_X0_Y0 0x0358 | 517 | #define R300_CRTC2_TILE_X0_Y0 0x0358 |
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h new file mode 100644 index 000000000000..eafd8160a155 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_trace.h | |||
@@ -0,0 +1,82 @@ | |||
1 | #if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _RADEON_TRACE_H_ | ||
3 | |||
4 | #include <linux/stringify.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/tracepoint.h> | ||
7 | |||
8 | #include <drm/drmP.h> | ||
9 | |||
10 | #undef TRACE_SYSTEM | ||
11 | #define TRACE_SYSTEM radeon | ||
12 | #define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) | ||
13 | #define TRACE_INCLUDE_FILE radeon_trace | ||
14 | |||
15 | TRACE_EVENT(radeon_bo_create, | ||
16 | TP_PROTO(struct radeon_bo *bo), | ||
17 | TP_ARGS(bo), | ||
18 | TP_STRUCT__entry( | ||
19 | __field(struct radeon_bo *, bo) | ||
20 | __field(u32, pages) | ||
21 | ), | ||
22 | |||
23 | TP_fast_assign( | ||
24 | __entry->bo = bo; | ||
25 | __entry->pages = bo->tbo.num_pages; | ||
26 | ), | ||
27 | TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) | ||
28 | ); | ||
29 | |||
30 | DECLARE_EVENT_CLASS(radeon_fence_request, | ||
31 | |||
32 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
33 | |||
34 | TP_ARGS(dev, seqno), | ||
35 | |||
36 | TP_STRUCT__entry( | ||
37 | __field(u32, dev) | ||
38 | __field(u32, seqno) | ||
39 | ), | ||
40 | |||
41 | TP_fast_assign( | ||
42 | __entry->dev = dev->primary->index; | ||
43 | __entry->seqno = seqno; | ||
44 | ), | ||
45 | |||
46 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | ||
47 | ); | ||
48 | |||
49 | DEFINE_EVENT(radeon_fence_request, radeon_fence_emit, | ||
50 | |||
51 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
52 | |||
53 | TP_ARGS(dev, seqno) | ||
54 | ); | ||
55 | |||
56 | DEFINE_EVENT(radeon_fence_request, radeon_fence_retire, | ||
57 | |||
58 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
59 | |||
60 | TP_ARGS(dev, seqno) | ||
61 | ); | ||
62 | |||
63 | DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, | ||
64 | |||
65 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
66 | |||
67 | TP_ARGS(dev, seqno) | ||
68 | ); | ||
69 | |||
70 | DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end, | ||
71 | |||
72 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
73 | |||
74 | TP_ARGS(dev, seqno) | ||
75 | ); | ||
76 | |||
77 | #endif | ||
78 | |||
79 | /* This part must be outside protection */ | ||
80 | #undef TRACE_INCLUDE_PATH | ||
81 | #define TRACE_INCLUDE_PATH . | ||
82 | #include <trace/define_trace.h> | ||
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c new file mode 100644 index 000000000000..8175993df84d --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_trace_points.c | |||
@@ -0,0 +1,9 @@ | |||
1 | /* Copyright Red Hat Inc 2010. | ||
2 | * Author : Dave Airlie <airlied@redhat.com> | ||
3 | */ | ||
4 | #include <drm/drmP.h> | ||
5 | #include "radeon_drm.h" | ||
6 | #include "radeon.h" | ||
7 | |||
8 | #define CREATE_TRACE_POINTS | ||
9 | #include "radeon_trace.h" | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index f1c6e02c2e6b..9a85b1614c86 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -46,6 +46,56 @@ | |||
46 | void rs600_gpu_init(struct radeon_device *rdev); | 46 | void rs600_gpu_init(struct radeon_device *rdev); |
47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
48 | 48 | ||
49 | void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) | ||
50 | { | ||
51 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; | ||
52 | u32 tmp; | ||
53 | |||
54 | /* make sure flip is at vb rather than hb */ | ||
55 | tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); | ||
56 | tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; | ||
57 | WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); | ||
58 | |||
59 | /* set pageflip to happen anywhere in vblank interval */ | ||
60 | WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); | ||
61 | |||
62 | /* enable the pflip int */ | ||
63 | radeon_irq_kms_pflip_irq_get(rdev, crtc); | ||
64 | } | ||
65 | |||
66 | void rs600_post_page_flip(struct radeon_device *rdev, int crtc) | ||
67 | { | ||
68 | /* disable the pflip int */ | ||
69 | radeon_irq_kms_pflip_irq_put(rdev, crtc); | ||
70 | } | ||
71 | |||
72 | u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | ||
73 | { | ||
74 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
75 | u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); | ||
76 | |||
77 | /* Lock the graphics update lock */ | ||
78 | tmp |= AVIVO_D1GRPH_UPDATE_LOCK; | ||
79 | WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
80 | |||
81 | /* update the scanout addresses */ | ||
82 | WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
83 | (u32)crtc_base); | ||
84 | WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
85 | (u32)crtc_base); | ||
86 | |||
87 | /* Wait for update_pending to go high. */ | ||
88 | while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); | ||
89 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
90 | |||
91 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
92 | tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; | ||
93 | WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
94 | |||
95 | /* Return current update_pending status: */ | ||
96 | return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; | ||
97 | } | ||
98 | |||
49 | void rs600_pm_misc(struct radeon_device *rdev) | 99 | void rs600_pm_misc(struct radeon_device *rdev) |
50 | { | 100 | { |
51 | int requested_index = rdev->pm.requested_power_state_index; | 101 | int requested_index = rdev->pm.requested_power_state_index; |
@@ -515,10 +565,12 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
515 | if (rdev->irq.gui_idle) { | 565 | if (rdev->irq.gui_idle) { |
516 | tmp |= S_000040_GUI_IDLE(1); | 566 | tmp |= S_000040_GUI_IDLE(1); |
517 | } | 567 | } |
518 | if (rdev->irq.crtc_vblank_int[0]) { | 568 | if (rdev->irq.crtc_vblank_int[0] || |
569 | rdev->irq.pflip[0]) { | ||
519 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); | 570 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); |
520 | } | 571 | } |
521 | if (rdev->irq.crtc_vblank_int[1]) { | 572 | if (rdev->irq.crtc_vblank_int[1] || |
573 | rdev->irq.pflip[1]) { | ||
522 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); | 574 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); |
523 | } | 575 | } |
524 | if (rdev->irq.hpd[0]) { | 576 | if (rdev->irq.hpd[0]) { |
@@ -534,7 +586,7 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
534 | return 0; | 586 | return 0; |
535 | } | 587 | } |
536 | 588 | ||
537 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) | 589 | static inline u32 rs600_irq_ack(struct radeon_device *rdev) |
538 | { | 590 | { |
539 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); | 591 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); |
540 | uint32_t irq_mask = S_000044_SW_INT(1); | 592 | uint32_t irq_mask = S_000044_SW_INT(1); |
@@ -547,27 +599,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ | |||
547 | } | 599 | } |
548 | 600 | ||
549 | if (G_000044_DISPLAY_INT_STAT(irqs)) { | 601 | if (G_000044_DISPLAY_INT_STAT(irqs)) { |
550 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); | 602 | rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); |
551 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { | 603 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
552 | WREG32(R_006534_D1MODE_VBLANK_STATUS, | 604 | WREG32(R_006534_D1MODE_VBLANK_STATUS, |
553 | S_006534_D1MODE_VBLANK_ACK(1)); | 605 | S_006534_D1MODE_VBLANK_ACK(1)); |
554 | } | 606 | } |
555 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { | 607 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
556 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, | 608 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, |
557 | S_006D34_D2MODE_VBLANK_ACK(1)); | 609 | S_006D34_D2MODE_VBLANK_ACK(1)); |
558 | } | 610 | } |
559 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { | 611 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
560 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); | 612 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); |
561 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); | 613 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); |
562 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | 614 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
563 | } | 615 | } |
564 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { | 616 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
565 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); | 617 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); |
566 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); | 618 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); |
567 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | 619 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
568 | } | 620 | } |
569 | } else { | 621 | } else { |
570 | *r500_disp_int = 0; | 622 | rdev->irq.stat_regs.r500.disp_int = 0; |
571 | } | 623 | } |
572 | 624 | ||
573 | if (irqs) { | 625 | if (irqs) { |
@@ -578,32 +630,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ | |||
578 | 630 | ||
579 | void rs600_irq_disable(struct radeon_device *rdev) | 631 | void rs600_irq_disable(struct radeon_device *rdev) |
580 | { | 632 | { |
581 | u32 tmp; | ||
582 | |||
583 | WREG32(R_000040_GEN_INT_CNTL, 0); | 633 | WREG32(R_000040_GEN_INT_CNTL, 0); |
584 | WREG32(R_006540_DxMODE_INT_MASK, 0); | 634 | WREG32(R_006540_DxMODE_INT_MASK, 0); |
585 | /* Wait and acknowledge irq */ | 635 | /* Wait and acknowledge irq */ |
586 | mdelay(1); | 636 | mdelay(1); |
587 | rs600_irq_ack(rdev, &tmp); | 637 | rs600_irq_ack(rdev); |
588 | } | 638 | } |
589 | 639 | ||
590 | int rs600_irq_process(struct radeon_device *rdev) | 640 | int rs600_irq_process(struct radeon_device *rdev) |
591 | { | 641 | { |
592 | uint32_t status, msi_rearm; | 642 | u32 status, msi_rearm; |
593 | uint32_t r500_disp_int; | ||
594 | bool queue_hotplug = false; | 643 | bool queue_hotplug = false; |
595 | 644 | ||
596 | /* reset gui idle ack. the status bit is broken */ | 645 | /* reset gui idle ack. the status bit is broken */ |
597 | rdev->irq.gui_idle_acked = false; | 646 | rdev->irq.gui_idle_acked = false; |
598 | 647 | ||
599 | status = rs600_irq_ack(rdev, &r500_disp_int); | 648 | status = rs600_irq_ack(rdev); |
600 | if (!status && !r500_disp_int) { | 649 | if (!status && !rdev->irq.stat_regs.r500.disp_int) { |
601 | return IRQ_NONE; | 650 | return IRQ_NONE; |
602 | } | 651 | } |
603 | while (status || r500_disp_int) { | 652 | while (status || rdev->irq.stat_regs.r500.disp_int) { |
604 | /* SW interrupt */ | 653 | /* SW interrupt */ |
605 | if (G_000044_SW_INT(status)) | 654 | if (G_000044_SW_INT(status)) { |
606 | radeon_fence_process(rdev); | 655 | radeon_fence_process(rdev); |
656 | } | ||
607 | /* GUI idle */ | 657 | /* GUI idle */ |
608 | if (G_000040_GUI_IDLE(status)) { | 658 | if (G_000040_GUI_IDLE(status)) { |
609 | rdev->irq.gui_idle_acked = true; | 659 | rdev->irq.gui_idle_acked = true; |
@@ -611,25 +661,33 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
611 | wake_up(&rdev->irq.idle_queue); | 661 | wake_up(&rdev->irq.idle_queue); |
612 | } | 662 | } |
613 | /* Vertical blank interrupts */ | 663 | /* Vertical blank interrupts */ |
614 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { | 664 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
615 | drm_handle_vblank(rdev->ddev, 0); | 665 | if (rdev->irq.crtc_vblank_int[0]) { |
616 | rdev->pm.vblank_sync = true; | 666 | drm_handle_vblank(rdev->ddev, 0); |
617 | wake_up(&rdev->irq.vblank_queue); | 667 | rdev->pm.vblank_sync = true; |
668 | wake_up(&rdev->irq.vblank_queue); | ||
669 | } | ||
670 | if (rdev->irq.pflip[0]) | ||
671 | radeon_crtc_handle_flip(rdev, 0); | ||
618 | } | 672 | } |
619 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { | 673 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
620 | drm_handle_vblank(rdev->ddev, 1); | 674 | if (rdev->irq.crtc_vblank_int[1]) { |
621 | rdev->pm.vblank_sync = true; | 675 | drm_handle_vblank(rdev->ddev, 1); |
622 | wake_up(&rdev->irq.vblank_queue); | 676 | rdev->pm.vblank_sync = true; |
677 | wake_up(&rdev->irq.vblank_queue); | ||
678 | } | ||
679 | if (rdev->irq.pflip[1]) | ||
680 | radeon_crtc_handle_flip(rdev, 1); | ||
623 | } | 681 | } |
624 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { | 682 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
625 | queue_hotplug = true; | 683 | queue_hotplug = true; |
626 | DRM_DEBUG("HPD1\n"); | 684 | DRM_DEBUG("HPD1\n"); |
627 | } | 685 | } |
628 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) { | 686 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
629 | queue_hotplug = true; | 687 | queue_hotplug = true; |
630 | DRM_DEBUG("HPD2\n"); | 688 | DRM_DEBUG("HPD2\n"); |
631 | } | 689 | } |
632 | status = rs600_irq_ack(rdev, &r500_disp_int); | 690 | status = rs600_irq_ack(rdev); |
633 | } | 691 | } |
634 | /* reset gui idle ack. the status bit is broken */ | 692 | /* reset gui idle ack. the status bit is broken */ |
635 | rdev->irq.gui_idle_acked = false; | 693 | rdev->irq.gui_idle_acked = false; |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4dfead8cee33..645aa1fd7611 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -42,6 +42,40 @@ | |||
42 | static void rv770_gpu_init(struct radeon_device *rdev); | 42 | static void rv770_gpu_init(struct radeon_device *rdev); |
43 | void rv770_fini(struct radeon_device *rdev); | 43 | void rv770_fini(struct radeon_device *rdev); |
44 | 44 | ||
45 | u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | ||
46 | { | ||
47 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
48 | u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); | ||
49 | |||
50 | /* Lock the graphics update lock */ | ||
51 | tmp |= AVIVO_D1GRPH_UPDATE_LOCK; | ||
52 | WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
53 | |||
54 | /* update the scanout addresses */ | ||
55 | if (radeon_crtc->crtc_id) { | ||
56 | WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); | ||
57 | WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); | ||
58 | } else { | ||
59 | WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); | ||
60 | WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); | ||
61 | } | ||
62 | WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
63 | (u32)crtc_base); | ||
64 | WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
65 | (u32)crtc_base); | ||
66 | |||
67 | /* Wait for update_pending to go high. */ | ||
68 | while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); | ||
69 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
70 | |||
71 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
72 | tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; | ||
73 | WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
74 | |||
75 | /* Return current update_pending status: */ | ||
76 | return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; | ||
77 | } | ||
78 | |||
45 | /* get temperature in millidegrees */ | 79 | /* get temperature in millidegrees */ |
46 | u32 rv770_get_temp(struct radeon_device *rdev) | 80 | u32 rv770_get_temp(struct radeon_device *rdev) |
47 | { | 81 | { |
@@ -489,6 +523,49 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
489 | return backend_map; | 523 | return backend_map; |
490 | } | 524 | } |
491 | 525 | ||
526 | static void rv770_program_channel_remap(struct radeon_device *rdev) | ||
527 | { | ||
528 | u32 tcp_chan_steer, mc_shared_chremap, tmp; | ||
529 | bool force_no_swizzle; | ||
530 | |||
531 | switch (rdev->family) { | ||
532 | case CHIP_RV770: | ||
533 | case CHIP_RV730: | ||
534 | force_no_swizzle = false; | ||
535 | break; | ||
536 | case CHIP_RV710: | ||
537 | case CHIP_RV740: | ||
538 | default: | ||
539 | force_no_swizzle = true; | ||
540 | break; | ||
541 | } | ||
542 | |||
543 | tmp = RREG32(MC_SHARED_CHMAP); | ||
544 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
545 | case 0: | ||
546 | case 1: | ||
547 | default: | ||
548 | /* default mapping */ | ||
549 | mc_shared_chremap = 0x00fac688; | ||
550 | break; | ||
551 | case 2: | ||
552 | case 3: | ||
553 | if (force_no_swizzle) | ||
554 | mc_shared_chremap = 0x00fac688; | ||
555 | else | ||
556 | mc_shared_chremap = 0x00bbc298; | ||
557 | break; | ||
558 | } | ||
559 | |||
560 | if (rdev->family == CHIP_RV740) | ||
561 | tcp_chan_steer = 0x00ef2a60; | ||
562 | else | ||
563 | tcp_chan_steer = 0x00fac688; | ||
564 | |||
565 | WREG32(TCP_CHAN_STEER, tcp_chan_steer); | ||
566 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
567 | } | ||
568 | |||
492 | static void rv770_gpu_init(struct radeon_device *rdev) | 569 | static void rv770_gpu_init(struct radeon_device *rdev) |
493 | { | 570 | { |
494 | int i, j, num_qd_pipes; | 571 | int i, j, num_qd_pipes; |
@@ -688,6 +765,8 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
688 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 765 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
689 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 766 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
690 | 767 | ||
768 | rv770_program_channel_remap(rdev); | ||
769 | |||
691 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 770 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
692 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 771 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
693 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 772 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
@@ -956,6 +1035,45 @@ static void rv770_vram_scratch_fini(struct radeon_device *rdev) | |||
956 | radeon_bo_unref(&rdev->vram_scratch.robj); | 1035 | radeon_bo_unref(&rdev->vram_scratch.robj); |
957 | } | 1036 | } |
958 | 1037 | ||
1038 | void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | ||
1039 | { | ||
1040 | u64 size_bf, size_af; | ||
1041 | |||
1042 | if (mc->mc_vram_size > 0xE0000000) { | ||
1043 | /* leave room for at least 512M GTT */ | ||
1044 | dev_warn(rdev->dev, "limiting VRAM\n"); | ||
1045 | mc->real_vram_size = 0xE0000000; | ||
1046 | mc->mc_vram_size = 0xE0000000; | ||
1047 | } | ||
1048 | if (rdev->flags & RADEON_IS_AGP) { | ||
1049 | size_bf = mc->gtt_start; | ||
1050 | size_af = 0xFFFFFFFF - mc->gtt_end + 1; | ||
1051 | if (size_bf > size_af) { | ||
1052 | if (mc->mc_vram_size > size_bf) { | ||
1053 | dev_warn(rdev->dev, "limiting VRAM\n"); | ||
1054 | mc->real_vram_size = size_bf; | ||
1055 | mc->mc_vram_size = size_bf; | ||
1056 | } | ||
1057 | mc->vram_start = mc->gtt_start - mc->mc_vram_size; | ||
1058 | } else { | ||
1059 | if (mc->mc_vram_size > size_af) { | ||
1060 | dev_warn(rdev->dev, "limiting VRAM\n"); | ||
1061 | mc->real_vram_size = size_af; | ||
1062 | mc->mc_vram_size = size_af; | ||
1063 | } | ||
1064 | mc->vram_start = mc->gtt_end; | ||
1065 | } | ||
1066 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | ||
1067 | dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", | ||
1068 | mc->mc_vram_size >> 20, mc->vram_start, | ||
1069 | mc->vram_end, mc->real_vram_size >> 20); | ||
1070 | } else { | ||
1071 | radeon_vram_location(rdev, &rdev->mc, 0); | ||
1072 | rdev->mc.gtt_base_align = 0; | ||
1073 | radeon_gtt_location(rdev, mc); | ||
1074 | } | ||
1075 | } | ||
1076 | |||
959 | int rv770_mc_init(struct radeon_device *rdev) | 1077 | int rv770_mc_init(struct radeon_device *rdev) |
960 | { | 1078 | { |
961 | u32 tmp; | 1079 | u32 tmp; |
@@ -996,7 +1114,7 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
996 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | 1114 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
997 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1115 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
998 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 1116 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; |
999 | r600_vram_gtt_location(rdev, &rdev->mc); | 1117 | r700_vram_gtt_location(rdev, &rdev->mc); |
1000 | radeon_update_bandwidth_info(rdev); | 1118 | radeon_update_bandwidth_info(rdev); |
1001 | 1119 | ||
1002 | return 0; | 1120 | return 0; |
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index b7a5a20e81dc..fc77e1e1a179 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h | |||
@@ -138,6 +138,7 @@ | |||
138 | #define MC_SHARED_CHMAP 0x2004 | 138 | #define MC_SHARED_CHMAP 0x2004 |
139 | #define NOOFCHAN_SHIFT 12 | 139 | #define NOOFCHAN_SHIFT 12 |
140 | #define NOOFCHAN_MASK 0x00003000 | 140 | #define NOOFCHAN_MASK 0x00003000 |
141 | #define MC_SHARED_CHREMAP 0x2008 | ||
141 | 142 | ||
142 | #define MC_ARB_RAMCFG 0x2760 | 143 | #define MC_ARB_RAMCFG 0x2760 |
143 | #define NOOFBANK_SHIFT 0 | 144 | #define NOOFBANK_SHIFT 0 |
@@ -303,6 +304,7 @@ | |||
303 | #define BILINEAR_PRECISION_8_BIT (1 << 31) | 304 | #define BILINEAR_PRECISION_8_BIT (1 << 31) |
304 | 305 | ||
305 | #define TCP_CNTL 0x9610 | 306 | #define TCP_CNTL 0x9610 |
307 | #define TCP_CHAN_STEER 0x9614 | ||
306 | 308 | ||
307 | #define VGT_CACHE_INVALIDATION 0x88C4 | 309 | #define VGT_CACHE_INVALIDATION 0x88C4 |
308 | #define CACHE_INVALIDATION(x) ((x)<<0) | 310 | #define CACHE_INVALIDATION(x) ((x)<<0) |
@@ -351,4 +353,11 @@ | |||
351 | 353 | ||
352 | #define SRBM_STATUS 0x0E50 | 354 | #define SRBM_STATUS 0x0E50 |
353 | 355 | ||
356 | #define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 | ||
357 | #define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 | ||
358 | #define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 | ||
359 | #define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 | ||
360 | #define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c | ||
361 | #define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c | ||
362 | |||
354 | #endif | 363 | #endif |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 148a322d8f5d..cf2ec562550e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) | |||
169 | } | 169 | } |
170 | EXPORT_SYMBOL(ttm_bo_wait_unreserved); | 170 | EXPORT_SYMBOL(ttm_bo_wait_unreserved); |
171 | 171 | ||
172 | static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) | 172 | void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
173 | { | 173 | { |
174 | struct ttm_bo_device *bdev = bo->bdev; | 174 | struct ttm_bo_device *bdev = bo->bdev; |
175 | struct ttm_mem_type_manager *man; | 175 | struct ttm_mem_type_manager *man; |
@@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) | |||
191 | } | 191 | } |
192 | } | 192 | } |
193 | 193 | ||
194 | /** | 194 | int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
195 | * Call with the lru_lock held. | ||
196 | */ | ||
197 | |||
198 | static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) | ||
199 | { | 195 | { |
200 | int put_count = 0; | 196 | int put_count = 0; |
201 | 197 | ||
@@ -227,9 +223,18 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | |||
227 | /** | 223 | /** |
228 | * Deadlock avoidance for multi-bo reserving. | 224 | * Deadlock avoidance for multi-bo reserving. |
229 | */ | 225 | */ |
230 | if (use_sequence && bo->seq_valid && | 226 | if (use_sequence && bo->seq_valid) { |
231 | (sequence - bo->val_seq < (1 << 31))) { | 227 | /** |
232 | return -EAGAIN; | 228 | * We've already reserved this one. |
229 | */ | ||
230 | if (unlikely(sequence == bo->val_seq)) | ||
231 | return -EDEADLK; | ||
232 | /** | ||
233 | * Already reserved by a thread that will not back | ||
234 | * off for us. We need to back off. | ||
235 | */ | ||
236 | if (unlikely(sequence - bo->val_seq < (1 << 31))) | ||
237 | return -EAGAIN; | ||
233 | } | 238 | } |
234 | 239 | ||
235 | if (no_wait) | 240 | if (no_wait) |
@@ -267,6 +272,13 @@ static void ttm_bo_ref_bug(struct kref *list_kref) | |||
267 | BUG(); | 272 | BUG(); |
268 | } | 273 | } |
269 | 274 | ||
275 | void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, | ||
276 | bool never_free) | ||
277 | { | ||
278 | kref_sub(&bo->list_kref, count, | ||
279 | (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); | ||
280 | } | ||
281 | |||
270 | int ttm_bo_reserve(struct ttm_buffer_object *bo, | 282 | int ttm_bo_reserve(struct ttm_buffer_object *bo, |
271 | bool interruptible, | 283 | bool interruptible, |
272 | bool no_wait, bool use_sequence, uint32_t sequence) | 284 | bool no_wait, bool use_sequence, uint32_t sequence) |
@@ -282,20 +294,24 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, | |||
282 | put_count = ttm_bo_del_from_lru(bo); | 294 | put_count = ttm_bo_del_from_lru(bo); |
283 | spin_unlock(&glob->lru_lock); | 295 | spin_unlock(&glob->lru_lock); |
284 | 296 | ||
285 | while (put_count--) | 297 | ttm_bo_list_ref_sub(bo, put_count, true); |
286 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
287 | 298 | ||
288 | return ret; | 299 | return ret; |
289 | } | 300 | } |
290 | 301 | ||
302 | void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) | ||
303 | { | ||
304 | ttm_bo_add_to_lru(bo); | ||
305 | atomic_set(&bo->reserved, 0); | ||
306 | wake_up_all(&bo->event_queue); | ||
307 | } | ||
308 | |||
291 | void ttm_bo_unreserve(struct ttm_buffer_object *bo) | 309 | void ttm_bo_unreserve(struct ttm_buffer_object *bo) |
292 | { | 310 | { |
293 | struct ttm_bo_global *glob = bo->glob; | 311 | struct ttm_bo_global *glob = bo->glob; |
294 | 312 | ||
295 | spin_lock(&glob->lru_lock); | 313 | spin_lock(&glob->lru_lock); |
296 | ttm_bo_add_to_lru(bo); | 314 | ttm_bo_unreserve_locked(bo); |
297 | atomic_set(&bo->reserved, 0); | ||
298 | wake_up_all(&bo->event_queue); | ||
299 | spin_unlock(&glob->lru_lock); | 315 | spin_unlock(&glob->lru_lock); |
300 | } | 316 | } |
301 | EXPORT_SYMBOL(ttm_bo_unreserve); | 317 | EXPORT_SYMBOL(ttm_bo_unreserve); |
@@ -362,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
362 | int ret = 0; | 378 | int ret = 0; |
363 | 379 | ||
364 | if (old_is_pci || new_is_pci || | 380 | if (old_is_pci || new_is_pci || |
365 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) | 381 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { |
366 | ttm_bo_unmap_virtual(bo); | 382 | ret = ttm_mem_io_lock(old_man, true); |
383 | if (unlikely(ret != 0)) | ||
384 | goto out_err; | ||
385 | ttm_bo_unmap_virtual_locked(bo); | ||
386 | ttm_mem_io_unlock(old_man); | ||
387 | } | ||
367 | 388 | ||
368 | /* | 389 | /* |
369 | * Create and bind a ttm if required. | 390 | * Create and bind a ttm if required. |
@@ -416,11 +437,9 @@ moved: | |||
416 | } | 437 | } |
417 | 438 | ||
418 | if (bo->mem.mm_node) { | 439 | if (bo->mem.mm_node) { |
419 | spin_lock(&bo->lock); | ||
420 | bo->offset = (bo->mem.start << PAGE_SHIFT) + | 440 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
421 | bdev->man[bo->mem.mem_type].gpu_offset; | 441 | bdev->man[bo->mem.mem_type].gpu_offset; |
422 | bo->cur_placement = bo->mem.placement; | 442 | bo->cur_placement = bo->mem.placement; |
423 | spin_unlock(&bo->lock); | ||
424 | } else | 443 | } else |
425 | bo->offset = 0; | 444 | bo->offset = 0; |
426 | 445 | ||
@@ -452,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | |||
452 | ttm_tt_destroy(bo->ttm); | 471 | ttm_tt_destroy(bo->ttm); |
453 | bo->ttm = NULL; | 472 | bo->ttm = NULL; |
454 | } | 473 | } |
455 | |||
456 | ttm_bo_mem_put(bo, &bo->mem); | 474 | ttm_bo_mem_put(bo, &bo->mem); |
457 | 475 | ||
458 | atomic_set(&bo->reserved, 0); | 476 | atomic_set(&bo->reserved, 0); |
@@ -474,14 +492,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
474 | int put_count; | 492 | int put_count; |
475 | int ret; | 493 | int ret; |
476 | 494 | ||
477 | spin_lock(&bo->lock); | 495 | spin_lock(&bdev->fence_lock); |
478 | (void) ttm_bo_wait(bo, false, false, true); | 496 | (void) ttm_bo_wait(bo, false, false, true); |
479 | if (!bo->sync_obj) { | 497 | if (!bo->sync_obj) { |
480 | 498 | ||
481 | spin_lock(&glob->lru_lock); | 499 | spin_lock(&glob->lru_lock); |
482 | 500 | ||
483 | /** | 501 | /** |
484 | * Lock inversion between bo::reserve and bo::lock here, | 502 | * Lock inversion between bo:reserve and bdev::fence_lock here, |
485 | * but that's OK, since we're only trylocking. | 503 | * but that's OK, since we're only trylocking. |
486 | */ | 504 | */ |
487 | 505 | ||
@@ -490,14 +508,13 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
490 | if (unlikely(ret == -EBUSY)) | 508 | if (unlikely(ret == -EBUSY)) |
491 | goto queue; | 509 | goto queue; |
492 | 510 | ||
493 | spin_unlock(&bo->lock); | 511 | spin_unlock(&bdev->fence_lock); |
494 | put_count = ttm_bo_del_from_lru(bo); | 512 | put_count = ttm_bo_del_from_lru(bo); |
495 | 513 | ||
496 | spin_unlock(&glob->lru_lock); | 514 | spin_unlock(&glob->lru_lock); |
497 | ttm_bo_cleanup_memtype_use(bo); | 515 | ttm_bo_cleanup_memtype_use(bo); |
498 | 516 | ||
499 | while (put_count--) | 517 | ttm_bo_list_ref_sub(bo, put_count, true); |
500 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
501 | 518 | ||
502 | return; | 519 | return; |
503 | } else { | 520 | } else { |
@@ -512,7 +529,7 @@ queue: | |||
512 | kref_get(&bo->list_kref); | 529 | kref_get(&bo->list_kref); |
513 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | 530 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
514 | spin_unlock(&glob->lru_lock); | 531 | spin_unlock(&glob->lru_lock); |
515 | spin_unlock(&bo->lock); | 532 | spin_unlock(&bdev->fence_lock); |
516 | 533 | ||
517 | if (sync_obj) { | 534 | if (sync_obj) { |
518 | driver->sync_obj_flush(sync_obj, sync_obj_arg); | 535 | driver->sync_obj_flush(sync_obj, sync_obj_arg); |
@@ -537,14 +554,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, | |||
537 | bool no_wait_reserve, | 554 | bool no_wait_reserve, |
538 | bool no_wait_gpu) | 555 | bool no_wait_gpu) |
539 | { | 556 | { |
557 | struct ttm_bo_device *bdev = bo->bdev; | ||
540 | struct ttm_bo_global *glob = bo->glob; | 558 | struct ttm_bo_global *glob = bo->glob; |
541 | int put_count; | 559 | int put_count; |
542 | int ret = 0; | 560 | int ret = 0; |
543 | 561 | ||
544 | retry: | 562 | retry: |
545 | spin_lock(&bo->lock); | 563 | spin_lock(&bdev->fence_lock); |
546 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 564 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
547 | spin_unlock(&bo->lock); | 565 | spin_unlock(&bdev->fence_lock); |
548 | 566 | ||
549 | if (unlikely(ret != 0)) | 567 | if (unlikely(ret != 0)) |
550 | return ret; | 568 | return ret; |
@@ -580,8 +598,7 @@ retry: | |||
580 | spin_unlock(&glob->lru_lock); | 598 | spin_unlock(&glob->lru_lock); |
581 | ttm_bo_cleanup_memtype_use(bo); | 599 | ttm_bo_cleanup_memtype_use(bo); |
582 | 600 | ||
583 | while (put_count--) | 601 | ttm_bo_list_ref_sub(bo, put_count, true); |
584 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
585 | 602 | ||
586 | return 0; | 603 | return 0; |
587 | } | 604 | } |
@@ -652,6 +669,7 @@ static void ttm_bo_release(struct kref *kref) | |||
652 | struct ttm_buffer_object *bo = | 669 | struct ttm_buffer_object *bo = |
653 | container_of(kref, struct ttm_buffer_object, kref); | 670 | container_of(kref, struct ttm_buffer_object, kref); |
654 | struct ttm_bo_device *bdev = bo->bdev; | 671 | struct ttm_bo_device *bdev = bo->bdev; |
672 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; | ||
655 | 673 | ||
656 | if (likely(bo->vm_node != NULL)) { | 674 | if (likely(bo->vm_node != NULL)) { |
657 | rb_erase(&bo->vm_rb, &bdev->addr_space_rb); | 675 | rb_erase(&bo->vm_rb, &bdev->addr_space_rb); |
@@ -659,6 +677,9 @@ static void ttm_bo_release(struct kref *kref) | |||
659 | bo->vm_node = NULL; | 677 | bo->vm_node = NULL; |
660 | } | 678 | } |
661 | write_unlock(&bdev->vm_lock); | 679 | write_unlock(&bdev->vm_lock); |
680 | ttm_mem_io_lock(man, false); | ||
681 | ttm_mem_io_free_vm(bo); | ||
682 | ttm_mem_io_unlock(man); | ||
662 | ttm_bo_cleanup_refs_or_queue(bo); | 683 | ttm_bo_cleanup_refs_or_queue(bo); |
663 | kref_put(&bo->list_kref, ttm_bo_release_list); | 684 | kref_put(&bo->list_kref, ttm_bo_release_list); |
664 | write_lock(&bdev->vm_lock); | 685 | write_lock(&bdev->vm_lock); |
@@ -698,9 +719,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
698 | struct ttm_placement placement; | 719 | struct ttm_placement placement; |
699 | int ret = 0; | 720 | int ret = 0; |
700 | 721 | ||
701 | spin_lock(&bo->lock); | 722 | spin_lock(&bdev->fence_lock); |
702 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 723 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
703 | spin_unlock(&bo->lock); | 724 | spin_unlock(&bdev->fence_lock); |
704 | 725 | ||
705 | if (unlikely(ret != 0)) { | 726 | if (unlikely(ret != 0)) { |
706 | if (ret != -ERESTARTSYS) { | 727 | if (ret != -ERESTARTSYS) { |
@@ -715,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
715 | 736 | ||
716 | evict_mem = bo->mem; | 737 | evict_mem = bo->mem; |
717 | evict_mem.mm_node = NULL; | 738 | evict_mem.mm_node = NULL; |
718 | evict_mem.bus.io_reserved = false; | 739 | evict_mem.bus.io_reserved_vm = false; |
740 | evict_mem.bus.io_reserved_count = 0; | ||
719 | 741 | ||
720 | placement.fpfn = 0; | 742 | placement.fpfn = 0; |
721 | placement.lpfn = 0; | 743 | placement.lpfn = 0; |
@@ -802,8 +824,7 @@ retry: | |||
802 | 824 | ||
803 | BUG_ON(ret != 0); | 825 | BUG_ON(ret != 0); |
804 | 826 | ||
805 | while (put_count--) | 827 | ttm_bo_list_ref_sub(bo, put_count, true); |
806 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
807 | 828 | ||
808 | ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); | 829 | ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); |
809 | ttm_bo_unreserve(bo); | 830 | ttm_bo_unreserve(bo); |
@@ -1036,6 +1057,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
1036 | { | 1057 | { |
1037 | int ret = 0; | 1058 | int ret = 0; |
1038 | struct ttm_mem_reg mem; | 1059 | struct ttm_mem_reg mem; |
1060 | struct ttm_bo_device *bdev = bo->bdev; | ||
1039 | 1061 | ||
1040 | BUG_ON(!atomic_read(&bo->reserved)); | 1062 | BUG_ON(!atomic_read(&bo->reserved)); |
1041 | 1063 | ||
@@ -1044,15 +1066,16 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
1044 | * Have the driver move function wait for idle when necessary, | 1066 | * Have the driver move function wait for idle when necessary, |
1045 | * instead of doing it here. | 1067 | * instead of doing it here. |
1046 | */ | 1068 | */ |
1047 | spin_lock(&bo->lock); | 1069 | spin_lock(&bdev->fence_lock); |
1048 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 1070 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
1049 | spin_unlock(&bo->lock); | 1071 | spin_unlock(&bdev->fence_lock); |
1050 | if (ret) | 1072 | if (ret) |
1051 | return ret; | 1073 | return ret; |
1052 | mem.num_pages = bo->num_pages; | 1074 | mem.num_pages = bo->num_pages; |
1053 | mem.size = mem.num_pages << PAGE_SHIFT; | 1075 | mem.size = mem.num_pages << PAGE_SHIFT; |
1054 | mem.page_alignment = bo->mem.page_alignment; | 1076 | mem.page_alignment = bo->mem.page_alignment; |
1055 | mem.bus.io_reserved = false; | 1077 | mem.bus.io_reserved_vm = false; |
1078 | mem.bus.io_reserved_count = 0; | ||
1056 | /* | 1079 | /* |
1057 | * Determine where to move the buffer. | 1080 | * Determine where to move the buffer. |
1058 | */ | 1081 | */ |
@@ -1163,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1163 | } | 1186 | } |
1164 | bo->destroy = destroy; | 1187 | bo->destroy = destroy; |
1165 | 1188 | ||
1166 | spin_lock_init(&bo->lock); | ||
1167 | kref_init(&bo->kref); | 1189 | kref_init(&bo->kref); |
1168 | kref_init(&bo->list_kref); | 1190 | kref_init(&bo->list_kref); |
1169 | atomic_set(&bo->cpu_writers, 0); | 1191 | atomic_set(&bo->cpu_writers, 0); |
@@ -1172,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1172 | INIT_LIST_HEAD(&bo->lru); | 1194 | INIT_LIST_HEAD(&bo->lru); |
1173 | INIT_LIST_HEAD(&bo->ddestroy); | 1195 | INIT_LIST_HEAD(&bo->ddestroy); |
1174 | INIT_LIST_HEAD(&bo->swap); | 1196 | INIT_LIST_HEAD(&bo->swap); |
1197 | INIT_LIST_HEAD(&bo->io_reserve_lru); | ||
1175 | bo->bdev = bdev; | 1198 | bo->bdev = bdev; |
1176 | bo->glob = bdev->glob; | 1199 | bo->glob = bdev->glob; |
1177 | bo->type = type; | 1200 | bo->type = type; |
@@ -1181,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1181 | bo->mem.num_pages = bo->num_pages; | 1204 | bo->mem.num_pages = bo->num_pages; |
1182 | bo->mem.mm_node = NULL; | 1205 | bo->mem.mm_node = NULL; |
1183 | bo->mem.page_alignment = page_alignment; | 1206 | bo->mem.page_alignment = page_alignment; |
1184 | bo->mem.bus.io_reserved = false; | 1207 | bo->mem.bus.io_reserved_vm = false; |
1208 | bo->mem.bus.io_reserved_count = 0; | ||
1185 | bo->buffer_start = buffer_start & PAGE_MASK; | 1209 | bo->buffer_start = buffer_start & PAGE_MASK; |
1186 | bo->priv_flags = 0; | 1210 | bo->priv_flags = 0; |
1187 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); | 1211 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
@@ -1355,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1355 | BUG_ON(type >= TTM_NUM_MEM_TYPES); | 1379 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1356 | man = &bdev->man[type]; | 1380 | man = &bdev->man[type]; |
1357 | BUG_ON(man->has_type); | 1381 | BUG_ON(man->has_type); |
1382 | man->io_reserve_fastpath = true; | ||
1383 | man->use_io_reserve_lru = false; | ||
1384 | mutex_init(&man->io_reserve_mutex); | ||
1385 | INIT_LIST_HEAD(&man->io_reserve_lru); | ||
1358 | 1386 | ||
1359 | ret = bdev->driver->init_mem_type(bdev, type, man); | 1387 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1360 | if (ret) | 1388 | if (ret) |
@@ -1527,7 +1555,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1527 | bdev->dev_mapping = NULL; | 1555 | bdev->dev_mapping = NULL; |
1528 | bdev->glob = glob; | 1556 | bdev->glob = glob; |
1529 | bdev->need_dma32 = need_dma32; | 1557 | bdev->need_dma32 = need_dma32; |
1530 | 1558 | bdev->val_seq = 0; | |
1559 | spin_lock_init(&bdev->fence_lock); | ||
1531 | mutex_lock(&glob->device_list_mutex); | 1560 | mutex_lock(&glob->device_list_mutex); |
1532 | list_add_tail(&bdev->device_list, &glob->device_list); | 1561 | list_add_tail(&bdev->device_list, &glob->device_list); |
1533 | mutex_unlock(&glob->device_list_mutex); | 1562 | mutex_unlock(&glob->device_list_mutex); |
@@ -1561,7 +1590,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
1561 | return true; | 1590 | return true; |
1562 | } | 1591 | } |
1563 | 1592 | ||
1564 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | 1593 | void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) |
1565 | { | 1594 | { |
1566 | struct ttm_bo_device *bdev = bo->bdev; | 1595 | struct ttm_bo_device *bdev = bo->bdev; |
1567 | loff_t offset = (loff_t) bo->addr_space_offset; | 1596 | loff_t offset = (loff_t) bo->addr_space_offset; |
@@ -1570,8 +1599,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | |||
1570 | if (!bdev->dev_mapping) | 1599 | if (!bdev->dev_mapping) |
1571 | return; | 1600 | return; |
1572 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); | 1601 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); |
1573 | ttm_mem_io_free(bdev, &bo->mem); | 1602 | ttm_mem_io_free_vm(bo); |
1603 | } | ||
1604 | |||
1605 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | ||
1606 | { | ||
1607 | struct ttm_bo_device *bdev = bo->bdev; | ||
1608 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; | ||
1609 | |||
1610 | ttm_mem_io_lock(man, false); | ||
1611 | ttm_bo_unmap_virtual_locked(bo); | ||
1612 | ttm_mem_io_unlock(man); | ||
1574 | } | 1613 | } |
1614 | |||
1615 | |||
1575 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); | 1616 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
1576 | 1617 | ||
1577 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) | 1618 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) |
@@ -1651,6 +1692,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1651 | bool lazy, bool interruptible, bool no_wait) | 1692 | bool lazy, bool interruptible, bool no_wait) |
1652 | { | 1693 | { |
1653 | struct ttm_bo_driver *driver = bo->bdev->driver; | 1694 | struct ttm_bo_driver *driver = bo->bdev->driver; |
1695 | struct ttm_bo_device *bdev = bo->bdev; | ||
1654 | void *sync_obj; | 1696 | void *sync_obj; |
1655 | void *sync_obj_arg; | 1697 | void *sync_obj_arg; |
1656 | int ret = 0; | 1698 | int ret = 0; |
@@ -1664,9 +1706,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1664 | void *tmp_obj = bo->sync_obj; | 1706 | void *tmp_obj = bo->sync_obj; |
1665 | bo->sync_obj = NULL; | 1707 | bo->sync_obj = NULL; |
1666 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 1708 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
1667 | spin_unlock(&bo->lock); | 1709 | spin_unlock(&bdev->fence_lock); |
1668 | driver->sync_obj_unref(&tmp_obj); | 1710 | driver->sync_obj_unref(&tmp_obj); |
1669 | spin_lock(&bo->lock); | 1711 | spin_lock(&bdev->fence_lock); |
1670 | continue; | 1712 | continue; |
1671 | } | 1713 | } |
1672 | 1714 | ||
@@ -1675,29 +1717,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1675 | 1717 | ||
1676 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | 1718 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
1677 | sync_obj_arg = bo->sync_obj_arg; | 1719 | sync_obj_arg = bo->sync_obj_arg; |
1678 | spin_unlock(&bo->lock); | 1720 | spin_unlock(&bdev->fence_lock); |
1679 | ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, | 1721 | ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, |
1680 | lazy, interruptible); | 1722 | lazy, interruptible); |
1681 | if (unlikely(ret != 0)) { | 1723 | if (unlikely(ret != 0)) { |
1682 | driver->sync_obj_unref(&sync_obj); | 1724 | driver->sync_obj_unref(&sync_obj); |
1683 | spin_lock(&bo->lock); | 1725 | spin_lock(&bdev->fence_lock); |
1684 | return ret; | 1726 | return ret; |
1685 | } | 1727 | } |
1686 | spin_lock(&bo->lock); | 1728 | spin_lock(&bdev->fence_lock); |
1687 | if (likely(bo->sync_obj == sync_obj && | 1729 | if (likely(bo->sync_obj == sync_obj && |
1688 | bo->sync_obj_arg == sync_obj_arg)) { | 1730 | bo->sync_obj_arg == sync_obj_arg)) { |
1689 | void *tmp_obj = bo->sync_obj; | 1731 | void *tmp_obj = bo->sync_obj; |
1690 | bo->sync_obj = NULL; | 1732 | bo->sync_obj = NULL; |
1691 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, | 1733 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, |
1692 | &bo->priv_flags); | 1734 | &bo->priv_flags); |
1693 | spin_unlock(&bo->lock); | 1735 | spin_unlock(&bdev->fence_lock); |
1694 | driver->sync_obj_unref(&sync_obj); | 1736 | driver->sync_obj_unref(&sync_obj); |
1695 | driver->sync_obj_unref(&tmp_obj); | 1737 | driver->sync_obj_unref(&tmp_obj); |
1696 | spin_lock(&bo->lock); | 1738 | spin_lock(&bdev->fence_lock); |
1697 | } else { | 1739 | } else { |
1698 | spin_unlock(&bo->lock); | 1740 | spin_unlock(&bdev->fence_lock); |
1699 | driver->sync_obj_unref(&sync_obj); | 1741 | driver->sync_obj_unref(&sync_obj); |
1700 | spin_lock(&bo->lock); | 1742 | spin_lock(&bdev->fence_lock); |
1701 | } | 1743 | } |
1702 | } | 1744 | } |
1703 | return 0; | 1745 | return 0; |
@@ -1706,6 +1748,7 @@ EXPORT_SYMBOL(ttm_bo_wait); | |||
1706 | 1748 | ||
1707 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | 1749 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
1708 | { | 1750 | { |
1751 | struct ttm_bo_device *bdev = bo->bdev; | ||
1709 | int ret = 0; | 1752 | int ret = 0; |
1710 | 1753 | ||
1711 | /* | 1754 | /* |
@@ -1715,9 +1758,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | |||
1715 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); | 1758 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); |
1716 | if (unlikely(ret != 0)) | 1759 | if (unlikely(ret != 0)) |
1717 | return ret; | 1760 | return ret; |
1718 | spin_lock(&bo->lock); | 1761 | spin_lock(&bdev->fence_lock); |
1719 | ret = ttm_bo_wait(bo, false, true, no_wait); | 1762 | ret = ttm_bo_wait(bo, false, true, no_wait); |
1720 | spin_unlock(&bo->lock); | 1763 | spin_unlock(&bdev->fence_lock); |
1721 | if (likely(ret == 0)) | 1764 | if (likely(ret == 0)) |
1722 | atomic_inc(&bo->cpu_writers); | 1765 | atomic_inc(&bo->cpu_writers); |
1723 | ttm_bo_unreserve(bo); | 1766 | ttm_bo_unreserve(bo); |
@@ -1783,16 +1826,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1783 | put_count = ttm_bo_del_from_lru(bo); | 1826 | put_count = ttm_bo_del_from_lru(bo); |
1784 | spin_unlock(&glob->lru_lock); | 1827 | spin_unlock(&glob->lru_lock); |
1785 | 1828 | ||
1786 | while (put_count--) | 1829 | ttm_bo_list_ref_sub(bo, put_count, true); |
1787 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
1788 | 1830 | ||
1789 | /** | 1831 | /** |
1790 | * Wait for GPU, then move to system cached. | 1832 | * Wait for GPU, then move to system cached. |
1791 | */ | 1833 | */ |
1792 | 1834 | ||
1793 | spin_lock(&bo->lock); | 1835 | spin_lock(&bo->bdev->fence_lock); |
1794 | ret = ttm_bo_wait(bo, false, false, false); | 1836 | ret = ttm_bo_wait(bo, false, false, false); |
1795 | spin_unlock(&bo->lock); | 1837 | spin_unlock(&bo->bdev->fence_lock); |
1796 | 1838 | ||
1797 | if (unlikely(ret != 0)) | 1839 | if (unlikely(ret != 0)) |
1798 | goto out; | 1840 | goto out; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3106d5bcce32..77dbf408c0d0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |||
75 | } | 75 | } |
76 | EXPORT_SYMBOL(ttm_bo_move_ttm); | 76 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
77 | 77 | ||
78 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 78 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
79 | { | 79 | { |
80 | int ret; | 80 | if (likely(man->io_reserve_fastpath)) |
81 | return 0; | ||
82 | |||
83 | if (interruptible) | ||
84 | return mutex_lock_interruptible(&man->io_reserve_mutex); | ||
85 | |||
86 | mutex_lock(&man->io_reserve_mutex); | ||
87 | return 0; | ||
88 | } | ||
81 | 89 | ||
82 | if (!mem->bus.io_reserved) { | 90 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
83 | mem->bus.io_reserved = true; | 91 | { |
92 | if (likely(man->io_reserve_fastpath)) | ||
93 | return; | ||
94 | |||
95 | mutex_unlock(&man->io_reserve_mutex); | ||
96 | } | ||
97 | |||
98 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) | ||
99 | { | ||
100 | struct ttm_buffer_object *bo; | ||
101 | |||
102 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) | ||
103 | return -EAGAIN; | ||
104 | |||
105 | bo = list_first_entry(&man->io_reserve_lru, | ||
106 | struct ttm_buffer_object, | ||
107 | io_reserve_lru); | ||
108 | list_del_init(&bo->io_reserve_lru); | ||
109 | ttm_bo_unmap_virtual_locked(bo); | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static int ttm_mem_io_reserve(struct ttm_bo_device *bdev, | ||
115 | struct ttm_mem_reg *mem) | ||
116 | { | ||
117 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
118 | int ret = 0; | ||
119 | |||
120 | if (!bdev->driver->io_mem_reserve) | ||
121 | return 0; | ||
122 | if (likely(man->io_reserve_fastpath)) | ||
123 | return bdev->driver->io_mem_reserve(bdev, mem); | ||
124 | |||
125 | if (bdev->driver->io_mem_reserve && | ||
126 | mem->bus.io_reserved_count++ == 0) { | ||
127 | retry: | ||
84 | ret = bdev->driver->io_mem_reserve(bdev, mem); | 128 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
129 | if (ret == -EAGAIN) { | ||
130 | ret = ttm_mem_io_evict(man); | ||
131 | if (ret == 0) | ||
132 | goto retry; | ||
133 | } | ||
134 | } | ||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | static void ttm_mem_io_free(struct ttm_bo_device *bdev, | ||
139 | struct ttm_mem_reg *mem) | ||
140 | { | ||
141 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
142 | |||
143 | if (likely(man->io_reserve_fastpath)) | ||
144 | return; | ||
145 | |||
146 | if (bdev->driver->io_mem_reserve && | ||
147 | --mem->bus.io_reserved_count == 0 && | ||
148 | bdev->driver->io_mem_free) | ||
149 | bdev->driver->io_mem_free(bdev, mem); | ||
150 | |||
151 | } | ||
152 | |||
153 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) | ||
154 | { | ||
155 | struct ttm_mem_reg *mem = &bo->mem; | ||
156 | int ret; | ||
157 | |||
158 | if (!mem->bus.io_reserved_vm) { | ||
159 | struct ttm_mem_type_manager *man = | ||
160 | &bo->bdev->man[mem->mem_type]; | ||
161 | |||
162 | ret = ttm_mem_io_reserve(bo->bdev, mem); | ||
85 | if (unlikely(ret != 0)) | 163 | if (unlikely(ret != 0)) |
86 | return ret; | 164 | return ret; |
165 | mem->bus.io_reserved_vm = true; | ||
166 | if (man->use_io_reserve_lru) | ||
167 | list_add_tail(&bo->io_reserve_lru, | ||
168 | &man->io_reserve_lru); | ||
87 | } | 169 | } |
88 | return 0; | 170 | return 0; |
89 | } | 171 | } |
90 | 172 | ||
91 | void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 173 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
92 | { | 174 | { |
93 | if (bdev->driver->io_mem_reserve) { | 175 | struct ttm_mem_reg *mem = &bo->mem; |
94 | if (mem->bus.io_reserved) { | 176 | |
95 | mem->bus.io_reserved = false; | 177 | if (mem->bus.io_reserved_vm) { |
96 | bdev->driver->io_mem_free(bdev, mem); | 178 | mem->bus.io_reserved_vm = false; |
97 | } | 179 | list_del_init(&bo->io_reserve_lru); |
180 | ttm_mem_io_free(bo->bdev, mem); | ||
98 | } | 181 | } |
99 | } | 182 | } |
100 | 183 | ||
101 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | 184 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
102 | void **virtual) | 185 | void **virtual) |
103 | { | 186 | { |
187 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
104 | int ret; | 188 | int ret; |
105 | void *addr; | 189 | void *addr; |
106 | 190 | ||
107 | *virtual = NULL; | 191 | *virtual = NULL; |
192 | (void) ttm_mem_io_lock(man, false); | ||
108 | ret = ttm_mem_io_reserve(bdev, mem); | 193 | ret = ttm_mem_io_reserve(bdev, mem); |
194 | ttm_mem_io_unlock(man); | ||
109 | if (ret || !mem->bus.is_iomem) | 195 | if (ret || !mem->bus.is_iomem) |
110 | return ret; | 196 | return ret; |
111 | 197 | ||
@@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |||
117 | else | 203 | else |
118 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); | 204 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
119 | if (!addr) { | 205 | if (!addr) { |
206 | (void) ttm_mem_io_lock(man, false); | ||
120 | ttm_mem_io_free(bdev, mem); | 207 | ttm_mem_io_free(bdev, mem); |
208 | ttm_mem_io_unlock(man); | ||
121 | return -ENOMEM; | 209 | return -ENOMEM; |
122 | } | 210 | } |
123 | } | 211 | } |
@@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |||
134 | 222 | ||
135 | if (virtual && mem->bus.addr == NULL) | 223 | if (virtual && mem->bus.addr == NULL) |
136 | iounmap(virtual); | 224 | iounmap(virtual); |
225 | (void) ttm_mem_io_lock(man, false); | ||
137 | ttm_mem_io_free(bdev, mem); | 226 | ttm_mem_io_free(bdev, mem); |
227 | ttm_mem_io_unlock(man); | ||
138 | } | 228 | } |
139 | 229 | ||
140 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | 230 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
@@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
231 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | 321 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
232 | struct ttm_tt *ttm = bo->ttm; | 322 | struct ttm_tt *ttm = bo->ttm; |
233 | struct ttm_mem_reg *old_mem = &bo->mem; | 323 | struct ttm_mem_reg *old_mem = &bo->mem; |
234 | struct ttm_mem_reg old_copy = *old_mem; | 324 | struct ttm_mem_reg old_copy; |
235 | void *old_iomap; | 325 | void *old_iomap; |
236 | void *new_iomap; | 326 | void *new_iomap; |
237 | int ret; | 327 | int ret; |
@@ -280,8 +370,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
280 | } | 370 | } |
281 | mb(); | 371 | mb(); |
282 | out2: | 372 | out2: |
283 | ttm_bo_free_old_node(bo); | 373 | old_copy = *old_mem; |
284 | |||
285 | *old_mem = *new_mem; | 374 | *old_mem = *new_mem; |
286 | new_mem->mm_node = NULL; | 375 | new_mem->mm_node = NULL; |
287 | 376 | ||
@@ -292,9 +381,10 @@ out2: | |||
292 | } | 381 | } |
293 | 382 | ||
294 | out1: | 383 | out1: |
295 | ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); | 384 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
296 | out: | 385 | out: |
297 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); | 386 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
387 | ttm_bo_mem_put(bo, &old_copy); | ||
298 | return ret; | 388 | return ret; |
299 | } | 389 | } |
300 | EXPORT_SYMBOL(ttm_bo_move_memcpy); | 390 | EXPORT_SYMBOL(ttm_bo_move_memcpy); |
@@ -337,11 +427,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
337 | * TODO: Explicit member copy would probably be better here. | 427 | * TODO: Explicit member copy would probably be better here. |
338 | */ | 428 | */ |
339 | 429 | ||
340 | spin_lock_init(&fbo->lock); | ||
341 | init_waitqueue_head(&fbo->event_queue); | 430 | init_waitqueue_head(&fbo->event_queue); |
342 | INIT_LIST_HEAD(&fbo->ddestroy); | 431 | INIT_LIST_HEAD(&fbo->ddestroy); |
343 | INIT_LIST_HEAD(&fbo->lru); | 432 | INIT_LIST_HEAD(&fbo->lru); |
344 | INIT_LIST_HEAD(&fbo->swap); | 433 | INIT_LIST_HEAD(&fbo->swap); |
434 | INIT_LIST_HEAD(&fbo->io_reserve_lru); | ||
345 | fbo->vm_node = NULL; | 435 | fbo->vm_node = NULL; |
346 | atomic_set(&fbo->cpu_writers, 0); | 436 | atomic_set(&fbo->cpu_writers, 0); |
347 | 437 | ||
@@ -453,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, | |||
453 | unsigned long start_page, unsigned long num_pages, | 543 | unsigned long start_page, unsigned long num_pages, |
454 | struct ttm_bo_kmap_obj *map) | 544 | struct ttm_bo_kmap_obj *map) |
455 | { | 545 | { |
546 | struct ttm_mem_type_manager *man = | ||
547 | &bo->bdev->man[bo->mem.mem_type]; | ||
456 | unsigned long offset, size; | 548 | unsigned long offset, size; |
457 | int ret; | 549 | int ret; |
458 | 550 | ||
@@ -467,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, | |||
467 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) | 559 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) |
468 | return -EPERM; | 560 | return -EPERM; |
469 | #endif | 561 | #endif |
562 | (void) ttm_mem_io_lock(man, false); | ||
470 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); | 563 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
564 | ttm_mem_io_unlock(man); | ||
471 | if (ret) | 565 | if (ret) |
472 | return ret; | 566 | return ret; |
473 | if (!bo->mem.bus.is_iomem) { | 567 | if (!bo->mem.bus.is_iomem) { |
@@ -482,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap); | |||
482 | 576 | ||
483 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | 577 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) |
484 | { | 578 | { |
579 | struct ttm_buffer_object *bo = map->bo; | ||
580 | struct ttm_mem_type_manager *man = | ||
581 | &bo->bdev->man[bo->mem.mem_type]; | ||
582 | |||
485 | if (!map->virtual) | 583 | if (!map->virtual) |
486 | return; | 584 | return; |
487 | switch (map->bo_kmap_type) { | 585 | switch (map->bo_kmap_type) { |
488 | case ttm_bo_map_iomap: | 586 | case ttm_bo_map_iomap: |
489 | iounmap(map->virtual); | 587 | iounmap(map->virtual); |
490 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); | ||
491 | break; | 588 | break; |
492 | case ttm_bo_map_vmap: | 589 | case ttm_bo_map_vmap: |
493 | vunmap(map->virtual); | 590 | vunmap(map->virtual); |
@@ -500,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |||
500 | default: | 597 | default: |
501 | BUG(); | 598 | BUG(); |
502 | } | 599 | } |
600 | (void) ttm_mem_io_lock(man, false); | ||
601 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); | ||
602 | ttm_mem_io_unlock(man); | ||
503 | map->virtual = NULL; | 603 | map->virtual = NULL; |
504 | map->page = NULL; | 604 | map->page = NULL; |
505 | } | 605 | } |
@@ -520,7 +620,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
520 | struct ttm_buffer_object *ghost_obj; | 620 | struct ttm_buffer_object *ghost_obj; |
521 | void *tmp_obj = NULL; | 621 | void *tmp_obj = NULL; |
522 | 622 | ||
523 | spin_lock(&bo->lock); | 623 | spin_lock(&bdev->fence_lock); |
524 | if (bo->sync_obj) { | 624 | if (bo->sync_obj) { |
525 | tmp_obj = bo->sync_obj; | 625 | tmp_obj = bo->sync_obj; |
526 | bo->sync_obj = NULL; | 626 | bo->sync_obj = NULL; |
@@ -529,7 +629,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
529 | bo->sync_obj_arg = sync_obj_arg; | 629 | bo->sync_obj_arg = sync_obj_arg; |
530 | if (evict) { | 630 | if (evict) { |
531 | ret = ttm_bo_wait(bo, false, false, false); | 631 | ret = ttm_bo_wait(bo, false, false, false); |
532 | spin_unlock(&bo->lock); | 632 | spin_unlock(&bdev->fence_lock); |
533 | if (tmp_obj) | 633 | if (tmp_obj) |
534 | driver->sync_obj_unref(&tmp_obj); | 634 | driver->sync_obj_unref(&tmp_obj); |
535 | if (ret) | 635 | if (ret) |
@@ -552,7 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
552 | */ | 652 | */ |
553 | 653 | ||
554 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 654 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
555 | spin_unlock(&bo->lock); | 655 | spin_unlock(&bdev->fence_lock); |
556 | if (tmp_obj) | 656 | if (tmp_obj) |
557 | driver->sync_obj_unref(&tmp_obj); | 657 | driver->sync_obj_unref(&tmp_obj); |
558 | 658 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index fe6cb77899f4..221b924acebe 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
83 | int i; | 83 | int i; |
84 | unsigned long address = (unsigned long)vmf->virtual_address; | 84 | unsigned long address = (unsigned long)vmf->virtual_address; |
85 | int retval = VM_FAULT_NOPAGE; | 85 | int retval = VM_FAULT_NOPAGE; |
86 | struct ttm_mem_type_manager *man = | ||
87 | &bdev->man[bo->mem.mem_type]; | ||
86 | 88 | ||
87 | /* | 89 | /* |
88 | * Work around locking order reversal in fault / nopfn | 90 | * Work around locking order reversal in fault / nopfn |
@@ -118,24 +120,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
118 | * move. | 120 | * move. |
119 | */ | 121 | */ |
120 | 122 | ||
121 | spin_lock(&bo->lock); | 123 | spin_lock(&bdev->fence_lock); |
122 | if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { | 124 | if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { |
123 | ret = ttm_bo_wait(bo, false, true, false); | 125 | ret = ttm_bo_wait(bo, false, true, false); |
124 | spin_unlock(&bo->lock); | 126 | spin_unlock(&bdev->fence_lock); |
125 | if (unlikely(ret != 0)) { | 127 | if (unlikely(ret != 0)) { |
126 | retval = (ret != -ERESTARTSYS) ? | 128 | retval = (ret != -ERESTARTSYS) ? |
127 | VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; | 129 | VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; |
128 | goto out_unlock; | 130 | goto out_unlock; |
129 | } | 131 | } |
130 | } else | 132 | } else |
131 | spin_unlock(&bo->lock); | 133 | spin_unlock(&bdev->fence_lock); |
132 | 134 | ||
133 | 135 | ret = ttm_mem_io_lock(man, true); | |
134 | ret = ttm_mem_io_reserve(bdev, &bo->mem); | 136 | if (unlikely(ret != 0)) { |
135 | if (ret) { | 137 | retval = VM_FAULT_NOPAGE; |
136 | retval = VM_FAULT_SIGBUS; | ||
137 | goto out_unlock; | 138 | goto out_unlock; |
138 | } | 139 | } |
140 | ret = ttm_mem_io_reserve_vm(bo); | ||
141 | if (unlikely(ret != 0)) { | ||
142 | retval = VM_FAULT_SIGBUS; | ||
143 | goto out_io_unlock; | ||
144 | } | ||
139 | 145 | ||
140 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + | 146 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
141 | bo->vm_node->start - vma->vm_pgoff; | 147 | bo->vm_node->start - vma->vm_pgoff; |
@@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
144 | 150 | ||
145 | if (unlikely(page_offset >= bo->num_pages)) { | 151 | if (unlikely(page_offset >= bo->num_pages)) { |
146 | retval = VM_FAULT_SIGBUS; | 152 | retval = VM_FAULT_SIGBUS; |
147 | goto out_unlock; | 153 | goto out_io_unlock; |
148 | } | 154 | } |
149 | 155 | ||
150 | /* | 156 | /* |
@@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
182 | page = ttm_tt_get_page(ttm, page_offset); | 188 | page = ttm_tt_get_page(ttm, page_offset); |
183 | if (unlikely(!page && i == 0)) { | 189 | if (unlikely(!page && i == 0)) { |
184 | retval = VM_FAULT_OOM; | 190 | retval = VM_FAULT_OOM; |
185 | goto out_unlock; | 191 | goto out_io_unlock; |
186 | } else if (unlikely(!page)) { | 192 | } else if (unlikely(!page)) { |
187 | break; | 193 | break; |
188 | } | 194 | } |
@@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
200 | else if (unlikely(ret != 0)) { | 206 | else if (unlikely(ret != 0)) { |
201 | retval = | 207 | retval = |
202 | (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; | 208 | (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; |
203 | goto out_unlock; | 209 | goto out_io_unlock; |
204 | } | 210 | } |
205 | 211 | ||
206 | address += PAGE_SIZE; | 212 | address += PAGE_SIZE; |
207 | if (unlikely(++page_offset >= page_last)) | 213 | if (unlikely(++page_offset >= page_last)) |
208 | break; | 214 | break; |
209 | } | 215 | } |
210 | 216 | out_io_unlock: | |
217 | ttm_mem_io_unlock(man); | ||
211 | out_unlock: | 218 | out_unlock: |
212 | ttm_bo_unreserve(bo); | 219 | ttm_bo_unreserve(bo); |
213 | return retval; | 220 | return retval; |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index c285c2902d15..3832fe10b4df 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | 34 | ||
35 | void ttm_eu_backoff_reservation(struct list_head *list) | 35 | static void ttm_eu_backoff_reservation_locked(struct list_head *list) |
36 | { | 36 | { |
37 | struct ttm_validate_buffer *entry; | 37 | struct ttm_validate_buffer *entry; |
38 | 38 | ||
@@ -41,10 +41,77 @@ void ttm_eu_backoff_reservation(struct list_head *list) | |||
41 | if (!entry->reserved) | 41 | if (!entry->reserved) |
42 | continue; | 42 | continue; |
43 | 43 | ||
44 | if (entry->removed) { | ||
45 | ttm_bo_add_to_lru(bo); | ||
46 | entry->removed = false; | ||
47 | |||
48 | } | ||
44 | entry->reserved = false; | 49 | entry->reserved = false; |
45 | ttm_bo_unreserve(bo); | 50 | atomic_set(&bo->reserved, 0); |
51 | wake_up_all(&bo->event_queue); | ||
52 | } | ||
53 | } | ||
54 | |||
55 | static void ttm_eu_del_from_lru_locked(struct list_head *list) | ||
56 | { | ||
57 | struct ttm_validate_buffer *entry; | ||
58 | |||
59 | list_for_each_entry(entry, list, head) { | ||
60 | struct ttm_buffer_object *bo = entry->bo; | ||
61 | if (!entry->reserved) | ||
62 | continue; | ||
63 | |||
64 | if (!entry->removed) { | ||
65 | entry->put_count = ttm_bo_del_from_lru(bo); | ||
66 | entry->removed = true; | ||
67 | } | ||
46 | } | 68 | } |
47 | } | 69 | } |
70 | |||
71 | static void ttm_eu_list_ref_sub(struct list_head *list) | ||
72 | { | ||
73 | struct ttm_validate_buffer *entry; | ||
74 | |||
75 | list_for_each_entry(entry, list, head) { | ||
76 | struct ttm_buffer_object *bo = entry->bo; | ||
77 | |||
78 | if (entry->put_count) { | ||
79 | ttm_bo_list_ref_sub(bo, entry->put_count, true); | ||
80 | entry->put_count = 0; | ||
81 | } | ||
82 | } | ||
83 | } | ||
84 | |||
85 | static int ttm_eu_wait_unreserved_locked(struct list_head *list, | ||
86 | struct ttm_buffer_object *bo) | ||
87 | { | ||
88 | struct ttm_bo_global *glob = bo->glob; | ||
89 | int ret; | ||
90 | |||
91 | ttm_eu_del_from_lru_locked(list); | ||
92 | spin_unlock(&glob->lru_lock); | ||
93 | ret = ttm_bo_wait_unreserved(bo, true); | ||
94 | spin_lock(&glob->lru_lock); | ||
95 | if (unlikely(ret != 0)) | ||
96 | ttm_eu_backoff_reservation_locked(list); | ||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | |||
101 | void ttm_eu_backoff_reservation(struct list_head *list) | ||
102 | { | ||
103 | struct ttm_validate_buffer *entry; | ||
104 | struct ttm_bo_global *glob; | ||
105 | |||
106 | if (list_empty(list)) | ||
107 | return; | ||
108 | |||
109 | entry = list_first_entry(list, struct ttm_validate_buffer, head); | ||
110 | glob = entry->bo->glob; | ||
111 | spin_lock(&glob->lru_lock); | ||
112 | ttm_eu_backoff_reservation_locked(list); | ||
113 | spin_unlock(&glob->lru_lock); | ||
114 | } | ||
48 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); | 115 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); |
49 | 116 | ||
50 | /* | 117 | /* |
@@ -59,37 +126,76 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); | |||
59 | * buffers in different orders. | 126 | * buffers in different orders. |
60 | */ | 127 | */ |
61 | 128 | ||
62 | int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) | 129 | int ttm_eu_reserve_buffers(struct list_head *list) |
63 | { | 130 | { |
131 | struct ttm_bo_global *glob; | ||
64 | struct ttm_validate_buffer *entry; | 132 | struct ttm_validate_buffer *entry; |
65 | int ret; | 133 | int ret; |
134 | uint32_t val_seq; | ||
135 | |||
136 | if (list_empty(list)) | ||
137 | return 0; | ||
138 | |||
139 | list_for_each_entry(entry, list, head) { | ||
140 | entry->reserved = false; | ||
141 | entry->put_count = 0; | ||
142 | entry->removed = false; | ||
143 | } | ||
144 | |||
145 | entry = list_first_entry(list, struct ttm_validate_buffer, head); | ||
146 | glob = entry->bo->glob; | ||
66 | 147 | ||
67 | retry: | 148 | retry: |
149 | spin_lock(&glob->lru_lock); | ||
150 | val_seq = entry->bo->bdev->val_seq++; | ||
151 | |||
68 | list_for_each_entry(entry, list, head) { | 152 | list_for_each_entry(entry, list, head) { |
69 | struct ttm_buffer_object *bo = entry->bo; | 153 | struct ttm_buffer_object *bo = entry->bo; |
70 | 154 | ||
71 | entry->reserved = false; | 155 | retry_this_bo: |
72 | ret = ttm_bo_reserve(bo, true, false, true, val_seq); | 156 | ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); |
73 | if (ret != 0) { | 157 | switch (ret) { |
74 | ttm_eu_backoff_reservation(list); | 158 | case 0: |
75 | if (ret == -EAGAIN) { | 159 | break; |
76 | ret = ttm_bo_wait_unreserved(bo, true); | 160 | case -EBUSY: |
77 | if (unlikely(ret != 0)) | 161 | ret = ttm_eu_wait_unreserved_locked(list, bo); |
78 | return ret; | 162 | if (unlikely(ret != 0)) { |
79 | goto retry; | 163 | spin_unlock(&glob->lru_lock); |
80 | } else | 164 | ttm_eu_list_ref_sub(list); |
81 | return ret; | 165 | return ret; |
166 | } | ||
167 | goto retry_this_bo; | ||
168 | case -EAGAIN: | ||
169 | ttm_eu_backoff_reservation_locked(list); | ||
170 | spin_unlock(&glob->lru_lock); | ||
171 | ttm_eu_list_ref_sub(list); | ||
172 | ret = ttm_bo_wait_unreserved(bo, true); | ||
173 | if (unlikely(ret != 0)) | ||
174 | return ret; | ||
175 | goto retry; | ||
176 | default: | ||
177 | ttm_eu_backoff_reservation_locked(list); | ||
178 | spin_unlock(&glob->lru_lock); | ||
179 | ttm_eu_list_ref_sub(list); | ||
180 | return ret; | ||
82 | } | 181 | } |
83 | 182 | ||
84 | entry->reserved = true; | 183 | entry->reserved = true; |
85 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | 184 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { |
86 | ttm_eu_backoff_reservation(list); | 185 | ttm_eu_backoff_reservation_locked(list); |
186 | spin_unlock(&glob->lru_lock); | ||
187 | ttm_eu_list_ref_sub(list); | ||
87 | ret = ttm_bo_wait_cpu(bo, false); | 188 | ret = ttm_bo_wait_cpu(bo, false); |
88 | if (ret) | 189 | if (ret) |
89 | return ret; | 190 | return ret; |
90 | goto retry; | 191 | goto retry; |
91 | } | 192 | } |
92 | } | 193 | } |
194 | |||
195 | ttm_eu_del_from_lru_locked(list); | ||
196 | spin_unlock(&glob->lru_lock); | ||
197 | ttm_eu_list_ref_sub(list); | ||
198 | |||
93 | return 0; | 199 | return 0; |
94 | } | 200 | } |
95 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | 201 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); |
@@ -97,21 +203,36 @@ EXPORT_SYMBOL(ttm_eu_reserve_buffers); | |||
97 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) | 203 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) |
98 | { | 204 | { |
99 | struct ttm_validate_buffer *entry; | 205 | struct ttm_validate_buffer *entry; |
206 | struct ttm_buffer_object *bo; | ||
207 | struct ttm_bo_global *glob; | ||
208 | struct ttm_bo_device *bdev; | ||
209 | struct ttm_bo_driver *driver; | ||
100 | 210 | ||
101 | list_for_each_entry(entry, list, head) { | 211 | if (list_empty(list)) |
102 | struct ttm_buffer_object *bo = entry->bo; | 212 | return; |
103 | struct ttm_bo_driver *driver = bo->bdev->driver; | 213 | |
104 | void *old_sync_obj; | 214 | bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; |
215 | bdev = bo->bdev; | ||
216 | driver = bdev->driver; | ||
217 | glob = bo->glob; | ||
105 | 218 | ||
106 | spin_lock(&bo->lock); | 219 | spin_lock(&bdev->fence_lock); |
107 | old_sync_obj = bo->sync_obj; | 220 | spin_lock(&glob->lru_lock); |
221 | |||
222 | list_for_each_entry(entry, list, head) { | ||
223 | bo = entry->bo; | ||
224 | entry->old_sync_obj = bo->sync_obj; | ||
108 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | 225 | bo->sync_obj = driver->sync_obj_ref(sync_obj); |
109 | bo->sync_obj_arg = entry->new_sync_obj_arg; | 226 | bo->sync_obj_arg = entry->new_sync_obj_arg; |
110 | spin_unlock(&bo->lock); | 227 | ttm_bo_unreserve_locked(bo); |
111 | ttm_bo_unreserve(bo); | ||
112 | entry->reserved = false; | 228 | entry->reserved = false; |
113 | if (old_sync_obj) | 229 | } |
114 | driver->sync_obj_unref(&old_sync_obj); | 230 | spin_unlock(&glob->lru_lock); |
231 | spin_unlock(&bdev->fence_lock); | ||
232 | |||
233 | list_for_each_entry(entry, list, head) { | ||
234 | if (entry->old_sync_obj) | ||
235 | driver->sync_obj_unref(&entry->old_sync_obj); | ||
115 | } | 236 | } |
116 | } | 237 | } |
117 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); | 238 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index e7a58d055041..10fc01f69c40 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -264,7 +264,6 @@ struct vmw_private { | |||
264 | */ | 264 | */ |
265 | 265 | ||
266 | struct vmw_sw_context ctx; | 266 | struct vmw_sw_context ctx; |
267 | uint32_t val_seq; | ||
268 | struct mutex cmdbuf_mutex; | 267 | struct mutex cmdbuf_mutex; |
269 | 268 | ||
270 | /** | 269 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 76954e3528c1..41b95ed6dbcd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -653,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
653 | ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); | 653 | ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); |
654 | if (unlikely(ret != 0)) | 654 | if (unlikely(ret != 0)) |
655 | goto out_err; | 655 | goto out_err; |
656 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes, | 656 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); |
657 | dev_priv->val_seq++); | ||
658 | if (unlikely(ret != 0)) | 657 | if (unlikely(ret != 0)) |
659 | goto out_err; | 658 | goto out_err; |
660 | 659 | ||
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 628f76772d22..0f14f94ed8f4 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -683,6 +683,21 @@ struct drm_master { | |||
683 | void *driver_priv; /**< Private structure for driver to use */ | 683 | void *driver_priv; /**< Private structure for driver to use */ |
684 | }; | 684 | }; |
685 | 685 | ||
686 | /* Size of ringbuffer for vblank timestamps. Just double-buffer | ||
687 | * in initial implementation. | ||
688 | */ | ||
689 | #define DRM_VBLANKTIME_RBSIZE 2 | ||
690 | |||
691 | /* Flags and return codes for get_vblank_timestamp() driver function. */ | ||
692 | #define DRM_CALLED_FROM_VBLIRQ 1 | ||
693 | #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) | ||
694 | #define DRM_VBLANKTIME_INVBL (1 << 1) | ||
695 | |||
696 | /* get_scanout_position() return flags */ | ||
697 | #define DRM_SCANOUTPOS_VALID (1 << 0) | ||
698 | #define DRM_SCANOUTPOS_INVBL (1 << 1) | ||
699 | #define DRM_SCANOUTPOS_ACCURATE (1 << 2) | ||
700 | |||
686 | /** | 701 | /** |
687 | * DRM driver structure. This structure represent the common code for | 702 | * DRM driver structure. This structure represent the common code for |
688 | * a family of cards. There will one drm_device for each card present | 703 | * a family of cards. There will one drm_device for each card present |
@@ -760,6 +775,68 @@ struct drm_driver { | |||
760 | */ | 775 | */ |
761 | int (*device_is_agp) (struct drm_device *dev); | 776 | int (*device_is_agp) (struct drm_device *dev); |
762 | 777 | ||
778 | /** | ||
779 | * Called by vblank timestamping code. | ||
780 | * | ||
781 | * Return the current display scanout position from a crtc. | ||
782 | * | ||
783 | * \param dev DRM device. | ||
784 | * \param crtc Id of the crtc to query. | ||
785 | * \param *vpos Target location for current vertical scanout position. | ||
786 | * \param *hpos Target location for current horizontal scanout position. | ||
787 | * | ||
788 | * Returns vpos as a positive number while in active scanout area. | ||
789 | * Returns vpos as a negative number inside vblank, counting the number | ||
790 | * of scanlines to go until end of vblank, e.g., -1 means "one scanline | ||
791 | * until start of active scanout / end of vblank." | ||
792 | * | ||
793 | * \return Flags, or'ed together as follows: | ||
794 | * | ||
795 | * DRM_SCANOUTPOS_VALID = Query successfull. | ||
796 | * DRM_SCANOUTPOS_INVBL = Inside vblank. | ||
797 | * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of | ||
798 | * this flag means that returned position may be offset by a constant | ||
799 | * but unknown small number of scanlines wrt. real scanout position. | ||
800 | * | ||
801 | */ | ||
802 | int (*get_scanout_position) (struct drm_device *dev, int crtc, | ||
803 | int *vpos, int *hpos); | ||
804 | |||
805 | /** | ||
806 | * Called by \c drm_get_last_vbltimestamp. Should return a precise | ||
807 | * timestamp when the most recent VBLANK interval ended or will end. | ||
808 | * | ||
809 | * Specifically, the timestamp in @vblank_time should correspond as | ||
810 | * closely as possible to the time when the first video scanline of | ||
811 | * the video frame after the end of VBLANK will start scanning out, | ||
812 | * the time immmediately after end of the VBLANK interval. If the | ||
813 | * @crtc is currently inside VBLANK, this will be a time in the future. | ||
814 | * If the @crtc is currently scanning out a frame, this will be the | ||
815 | * past start time of the current scanout. This is meant to adhere | ||
816 | * to the OpenML OML_sync_control extension specification. | ||
817 | * | ||
818 | * \param dev dev DRM device handle. | ||
819 | * \param crtc crtc for which timestamp should be returned. | ||
820 | * \param *max_error Maximum allowable timestamp error in nanoseconds. | ||
821 | * Implementation should strive to provide timestamp | ||
822 | * with an error of at most *max_error nanoseconds. | ||
823 | * Returns true upper bound on error for timestamp. | ||
824 | * \param *vblank_time Target location for returned vblank timestamp. | ||
825 | * \param flags 0 = Defaults, no special treatment needed. | ||
826 | * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank | ||
827 | * irq handler. Some drivers need to apply some workarounds | ||
828 | * for gpu-specific vblank irq quirks if flag is set. | ||
829 | * | ||
830 | * \returns | ||
831 | * Zero if timestamping isn't supported in current display mode or a | ||
832 | * negative number on failure. A positive status code on success, | ||
833 | * which describes how the vblank_time timestamp was computed. | ||
834 | */ | ||
835 | int (*get_vblank_timestamp) (struct drm_device *dev, int crtc, | ||
836 | int *max_error, | ||
837 | struct timeval *vblank_time, | ||
838 | unsigned flags); | ||
839 | |||
763 | /* these have to be filled in */ | 840 | /* these have to be filled in */ |
764 | 841 | ||
765 | irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); | 842 | irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); |
@@ -983,6 +1060,8 @@ struct drm_device { | |||
983 | 1060 | ||
984 | wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ | 1061 | wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ |
985 | atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ | 1062 | atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ |
1063 | struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */ | ||
1064 | spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ | ||
986 | spinlock_t vbl_lock; | 1065 | spinlock_t vbl_lock; |
987 | atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ | 1066 | atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ |
988 | u32 *last_vblank; /* protected by dev->vbl_lock, used */ | 1067 | u32 *last_vblank; /* protected by dev->vbl_lock, used */ |
@@ -1282,11 +1361,22 @@ extern int drm_wait_vblank(struct drm_device *dev, void *data, | |||
1282 | struct drm_file *filp); | 1361 | struct drm_file *filp); |
1283 | extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); | 1362 | extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); |
1284 | extern u32 drm_vblank_count(struct drm_device *dev, int crtc); | 1363 | extern u32 drm_vblank_count(struct drm_device *dev, int crtc); |
1364 | extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, | ||
1365 | struct timeval *vblanktime); | ||
1285 | extern void drm_handle_vblank(struct drm_device *dev, int crtc); | 1366 | extern void drm_handle_vblank(struct drm_device *dev, int crtc); |
1286 | extern int drm_vblank_get(struct drm_device *dev, int crtc); | 1367 | extern int drm_vblank_get(struct drm_device *dev, int crtc); |
1287 | extern void drm_vblank_put(struct drm_device *dev, int crtc); | 1368 | extern void drm_vblank_put(struct drm_device *dev, int crtc); |
1288 | extern void drm_vblank_off(struct drm_device *dev, int crtc); | 1369 | extern void drm_vblank_off(struct drm_device *dev, int crtc); |
1289 | extern void drm_vblank_cleanup(struct drm_device *dev); | 1370 | extern void drm_vblank_cleanup(struct drm_device *dev); |
1371 | extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, | ||
1372 | struct timeval *tvblank, unsigned flags); | ||
1373 | extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, | ||
1374 | int crtc, int *max_error, | ||
1375 | struct timeval *vblank_time, | ||
1376 | unsigned flags, | ||
1377 | struct drm_crtc *refcrtc); | ||
1378 | extern void drm_calc_timestamping_constants(struct drm_crtc *crtc); | ||
1379 | |||
1290 | /* Modesetting support */ | 1380 | /* Modesetting support */ |
1291 | extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); | 1381 | extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); |
1292 | extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); | 1382 | extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); |
@@ -1337,6 +1427,9 @@ extern void drm_put_dev(struct drm_device *dev); | |||
1337 | extern int drm_put_minor(struct drm_minor **minor); | 1427 | extern int drm_put_minor(struct drm_minor **minor); |
1338 | extern unsigned int drm_debug; | 1428 | extern unsigned int drm_debug; |
1339 | 1429 | ||
1430 | extern unsigned int drm_vblank_offdelay; | ||
1431 | extern unsigned int drm_timestamp_precision; | ||
1432 | |||
1340 | extern struct class *drm_class; | 1433 | extern struct class *drm_class; |
1341 | extern struct proc_dir_entry *drm_proc_root; | 1434 | extern struct proc_dir_entry *drm_proc_root; |
1342 | extern struct dentry *drm_debugfs_root; | 1435 | extern struct dentry *drm_debugfs_root; |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 029aa688e787..acd7fade160d 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -351,8 +351,14 @@ struct drm_crtc { | |||
351 | 351 | ||
352 | bool enabled; | 352 | bool enabled; |
353 | 353 | ||
354 | /* Requested mode from modesetting. */ | ||
354 | struct drm_display_mode mode; | 355 | struct drm_display_mode mode; |
355 | 356 | ||
357 | /* Programmed mode in hw, after adjustments for encoders, | ||
358 | * crtc, panel scaling etc. Needed for timestamping etc. | ||
359 | */ | ||
360 | struct drm_display_mode hwmode; | ||
361 | |||
356 | int x, y; | 362 | int x, y; |
357 | const struct drm_crtc_funcs *funcs; | 363 | const struct drm_crtc_funcs *funcs; |
358 | 364 | ||
@@ -360,6 +366,9 @@ struct drm_crtc { | |||
360 | uint32_t gamma_size; | 366 | uint32_t gamma_size; |
361 | uint16_t *gamma_store; | 367 | uint16_t *gamma_store; |
362 | 368 | ||
369 | /* Constants needed for precise vblank and swap timestamping. */ | ||
370 | s64 framedur_ns, linedur_ns, pixeldur_ns; | ||
371 | |||
363 | /* if you are using the helper */ | 372 | /* if you are using the helper */ |
364 | void *helper_private; | 373 | void *helper_private; |
365 | }; | 374 | }; |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 883c1d439899..e6b28a39942f 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -419,6 +419,10 @@ | |||
419 | {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 419 | {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
420 | {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 420 | {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
421 | {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 421 | {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
422 | {0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
423 | {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
424 | {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
425 | {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
422 | {0, 0, 0} | 426 | {0, 0, 0} |
423 | 427 | ||
424 | #define r128_PCI_IDS \ | 428 | #define r128_PCI_IDS \ |
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index bc5590b1a1ac..e2cfe80f6fca 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h | |||
@@ -71,16 +71,14 @@ struct drm_nouveau_gpuobj_free { | |||
71 | #define NOUVEAU_GETPARAM_PCI_VENDOR 3 | 71 | #define NOUVEAU_GETPARAM_PCI_VENDOR 3 |
72 | #define NOUVEAU_GETPARAM_PCI_DEVICE 4 | 72 | #define NOUVEAU_GETPARAM_PCI_DEVICE 4 |
73 | #define NOUVEAU_GETPARAM_BUS_TYPE 5 | 73 | #define NOUVEAU_GETPARAM_BUS_TYPE 5 |
74 | #define NOUVEAU_GETPARAM_FB_PHYSICAL 6 | ||
75 | #define NOUVEAU_GETPARAM_AGP_PHYSICAL 7 | ||
76 | #define NOUVEAU_GETPARAM_FB_SIZE 8 | 74 | #define NOUVEAU_GETPARAM_FB_SIZE 8 |
77 | #define NOUVEAU_GETPARAM_AGP_SIZE 9 | 75 | #define NOUVEAU_GETPARAM_AGP_SIZE 9 |
78 | #define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 | ||
79 | #define NOUVEAU_GETPARAM_CHIPSET_ID 11 | 76 | #define NOUVEAU_GETPARAM_CHIPSET_ID 11 |
80 | #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 | 77 | #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 |
81 | #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 | 78 | #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 |
82 | #define NOUVEAU_GETPARAM_PTIMER_TIME 14 | 79 | #define NOUVEAU_GETPARAM_PTIMER_TIME 14 |
83 | #define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 | 80 | #define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 |
81 | #define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16 | ||
84 | struct drm_nouveau_getparam { | 82 | struct drm_nouveau_getparam { |
85 | uint64_t param; | 83 | uint64_t param; |
86 | uint64_t value; | 84 | uint64_t value; |
@@ -171,7 +169,6 @@ struct drm_nouveau_gem_pushbuf { | |||
171 | }; | 169 | }; |
172 | 170 | ||
173 | #define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 | 171 | #define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 |
174 | #define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002 | ||
175 | #define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 | 172 | #define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 |
176 | struct drm_nouveau_gem_cpu_prep { | 173 | struct drm_nouveau_gem_cpu_prep { |
177 | uint32_t handle; | 174 | uint32_t handle; |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index beafc156a535..50852aad260a 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -74,6 +74,8 @@ struct ttm_placement { | |||
74 | * @is_iomem: is this io memory ? | 74 | * @is_iomem: is this io memory ? |
75 | * @size: size in byte | 75 | * @size: size in byte |
76 | * @offset: offset from the base address | 76 | * @offset: offset from the base address |
77 | * @io_reserved_vm: The VM system has a refcount in @io_reserved_count | ||
78 | * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve | ||
77 | * | 79 | * |
78 | * Structure indicating the bus placement of an object. | 80 | * Structure indicating the bus placement of an object. |
79 | */ | 81 | */ |
@@ -83,7 +85,8 @@ struct ttm_bus_placement { | |||
83 | unsigned long size; | 85 | unsigned long size; |
84 | unsigned long offset; | 86 | unsigned long offset; |
85 | bool is_iomem; | 87 | bool is_iomem; |
86 | bool io_reserved; | 88 | bool io_reserved_vm; |
89 | uint64_t io_reserved_count; | ||
87 | }; | 90 | }; |
88 | 91 | ||
89 | 92 | ||
@@ -154,7 +157,6 @@ struct ttm_tt; | |||
154 | * keeps one refcount. When this refcount reaches zero, | 157 | * keeps one refcount. When this refcount reaches zero, |
155 | * the object is destroyed. | 158 | * the object is destroyed. |
156 | * @event_queue: Queue for processes waiting on buffer object status change. | 159 | * @event_queue: Queue for processes waiting on buffer object status change. |
157 | * @lock: spinlock protecting mostly synchronization members. | ||
158 | * @mem: structure describing current placement. | 160 | * @mem: structure describing current placement. |
159 | * @persistant_swap_storage: Usually the swap storage is deleted for buffers | 161 | * @persistant_swap_storage: Usually the swap storage is deleted for buffers |
160 | * pinned in physical memory. If this behaviour is not desired, this member | 162 | * pinned in physical memory. If this behaviour is not desired, this member |
@@ -213,7 +215,6 @@ struct ttm_buffer_object { | |||
213 | struct kref kref; | 215 | struct kref kref; |
214 | struct kref list_kref; | 216 | struct kref list_kref; |
215 | wait_queue_head_t event_queue; | 217 | wait_queue_head_t event_queue; |
216 | spinlock_t lock; | ||
217 | 218 | ||
218 | /** | 219 | /** |
219 | * Members protected by the bo::reserved lock. | 220 | * Members protected by the bo::reserved lock. |
@@ -237,6 +238,7 @@ struct ttm_buffer_object { | |||
237 | struct list_head lru; | 238 | struct list_head lru; |
238 | struct list_head ddestroy; | 239 | struct list_head ddestroy; |
239 | struct list_head swap; | 240 | struct list_head swap; |
241 | struct list_head io_reserve_lru; | ||
240 | uint32_t val_seq; | 242 | uint32_t val_seq; |
241 | bool seq_valid; | 243 | bool seq_valid; |
242 | 244 | ||
@@ -248,10 +250,10 @@ struct ttm_buffer_object { | |||
248 | atomic_t reserved; | 250 | atomic_t reserved; |
249 | 251 | ||
250 | /** | 252 | /** |
251 | * Members protected by the bo::lock | 253 | * Members protected by struct buffer_object_device::fence_lock |
252 | * In addition, setting sync_obj to anything else | 254 | * In addition, setting sync_obj to anything else |
253 | * than NULL requires bo::reserved to be held. This allows for | 255 | * than NULL requires bo::reserved to be held. This allows for |
254 | * checking NULL while reserved but not holding bo::lock. | 256 | * checking NULL while reserved but not holding the mentioned lock. |
255 | */ | 257 | */ |
256 | 258 | ||
257 | void *sync_obj_arg; | 259 | void *sync_obj_arg; |
@@ -364,6 +366,44 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo, | |||
364 | */ | 366 | */ |
365 | extern void ttm_bo_unref(struct ttm_buffer_object **bo); | 367 | extern void ttm_bo_unref(struct ttm_buffer_object **bo); |
366 | 368 | ||
369 | |||
370 | /** | ||
371 | * ttm_bo_list_ref_sub | ||
372 | * | ||
373 | * @bo: The buffer object. | ||
374 | * @count: The number of references with which to decrease @bo::list_kref; | ||
375 | * @never_free: The refcount should not reach zero with this operation. | ||
376 | * | ||
377 | * Release @count lru list references to this buffer object. | ||
378 | */ | ||
379 | extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, | ||
380 | bool never_free); | ||
381 | |||
382 | /** | ||
383 | * ttm_bo_add_to_lru | ||
384 | * | ||
385 | * @bo: The buffer object. | ||
386 | * | ||
387 | * Add this bo to the relevant mem type lru and, if it's backed by | ||
388 | * system pages (ttms) to the swap list. | ||
389 | * This function must be called with struct ttm_bo_global::lru_lock held, and | ||
390 | * is typically called immediately prior to unreserving a bo. | ||
391 | */ | ||
392 | extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); | ||
393 | |||
394 | /** | ||
395 | * ttm_bo_del_from_lru | ||
396 | * | ||
397 | * @bo: The buffer object. | ||
398 | * | ||
399 | * Remove this bo from all lru lists used to lookup and reserve an object. | ||
400 | * This function must be called with struct ttm_bo_global::lru_lock held, | ||
401 | * and is usually called just immediately after the bo has been reserved to | ||
402 | * avoid recursive reservation from lru lists. | ||
403 | */ | ||
404 | extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); | ||
405 | |||
406 | |||
367 | /** | 407 | /** |
368 | * ttm_bo_lock_delayed_workqueue | 408 | * ttm_bo_lock_delayed_workqueue |
369 | * | 409 | * |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 8e0c848326b6..1da8af6ac884 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -179,30 +179,6 @@ struct ttm_tt { | |||
179 | #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ | 179 | #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ |
180 | #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ | 180 | #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ |
181 | 181 | ||
182 | /** | ||
183 | * struct ttm_mem_type_manager | ||
184 | * | ||
185 | * @has_type: The memory type has been initialized. | ||
186 | * @use_type: The memory type is enabled. | ||
187 | * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory | ||
188 | * managed by this memory type. | ||
189 | * @gpu_offset: If used, the GPU offset of the first managed page of | ||
190 | * fixed memory or the first managed location in an aperture. | ||
191 | * @size: Size of the managed region. | ||
192 | * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, | ||
193 | * as defined in ttm_placement_common.h | ||
194 | * @default_caching: The default caching policy used for a buffer object | ||
195 | * placed in this memory type if the user doesn't provide one. | ||
196 | * @manager: The range manager used for this memory type. FIXME: If the aperture | ||
197 | * has a page size different from the underlying system, the granularity | ||
198 | * of this manager should take care of this. But the range allocating code | ||
199 | * in ttm_bo.c needs to be modified for this. | ||
200 | * @lru: The lru list for this memory type. | ||
201 | * | ||
202 | * This structure is used to identify and manage memory types for a device. | ||
203 | * It's set up by the ttm_bo_driver::init_mem_type method. | ||
204 | */ | ||
205 | |||
206 | struct ttm_mem_type_manager; | 182 | struct ttm_mem_type_manager; |
207 | 183 | ||
208 | struct ttm_mem_type_manager_func { | 184 | struct ttm_mem_type_manager_func { |
@@ -287,6 +263,36 @@ struct ttm_mem_type_manager_func { | |||
287 | void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); | 263 | void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); |
288 | }; | 264 | }; |
289 | 265 | ||
266 | /** | ||
267 | * struct ttm_mem_type_manager | ||
268 | * | ||
269 | * @has_type: The memory type has been initialized. | ||
270 | * @use_type: The memory type is enabled. | ||
271 | * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory | ||
272 | * managed by this memory type. | ||
273 | * @gpu_offset: If used, the GPU offset of the first managed page of | ||
274 | * fixed memory or the first managed location in an aperture. | ||
275 | * @size: Size of the managed region. | ||
276 | * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, | ||
277 | * as defined in ttm_placement_common.h | ||
278 | * @default_caching: The default caching policy used for a buffer object | ||
279 | * placed in this memory type if the user doesn't provide one. | ||
280 | * @func: structure pointer implementing the range manager. See above | ||
281 | * @priv: Driver private closure for @func. | ||
282 | * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures | ||
283 | * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions | ||
284 | * reserved by the TTM vm system. | ||
285 | * @io_reserve_lru: Optional lru list for unreserving io mem regions. | ||
286 | * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain | ||
287 | * static information. bdev::driver::io_mem_free is never used. | ||
288 | * @lru: The lru list for this memory type. | ||
289 | * | ||
290 | * This structure is used to identify and manage memory types for a device. | ||
291 | * It's set up by the ttm_bo_driver::init_mem_type method. | ||
292 | */ | ||
293 | |||
294 | |||
295 | |||
290 | struct ttm_mem_type_manager { | 296 | struct ttm_mem_type_manager { |
291 | struct ttm_bo_device *bdev; | 297 | struct ttm_bo_device *bdev; |
292 | 298 | ||
@@ -303,6 +309,15 @@ struct ttm_mem_type_manager { | |||
303 | uint32_t default_caching; | 309 | uint32_t default_caching; |
304 | const struct ttm_mem_type_manager_func *func; | 310 | const struct ttm_mem_type_manager_func *func; |
305 | void *priv; | 311 | void *priv; |
312 | struct mutex io_reserve_mutex; | ||
313 | bool use_io_reserve_lru; | ||
314 | bool io_reserve_fastpath; | ||
315 | |||
316 | /* | ||
317 | * Protected by @io_reserve_mutex: | ||
318 | */ | ||
319 | |||
320 | struct list_head io_reserve_lru; | ||
306 | 321 | ||
307 | /* | 322 | /* |
308 | * Protected by the global->lru_lock. | 323 | * Protected by the global->lru_lock. |
@@ -510,9 +525,12 @@ struct ttm_bo_global { | |||
510 | * | 525 | * |
511 | * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. | 526 | * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. |
512 | * @man: An array of mem_type_managers. | 527 | * @man: An array of mem_type_managers. |
528 | * @fence_lock: Protects the synchronizing members on *all* bos belonging | ||
529 | * to this device. | ||
513 | * @addr_space_mm: Range manager for the device address space. | 530 | * @addr_space_mm: Range manager for the device address space. |
514 | * lru_lock: Spinlock that protects the buffer+device lru lists and | 531 | * lru_lock: Spinlock that protects the buffer+device lru lists and |
515 | * ddestroy lists. | 532 | * ddestroy lists. |
533 | * @val_seq: Current validation sequence. | ||
516 | * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. | 534 | * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. |
517 | * If a GPU lockup has been detected, this is forced to 0. | 535 | * If a GPU lockup has been detected, this is forced to 0. |
518 | * @dev_mapping: A pointer to the struct address_space representing the | 536 | * @dev_mapping: A pointer to the struct address_space representing the |
@@ -531,6 +549,7 @@ struct ttm_bo_device { | |||
531 | struct ttm_bo_driver *driver; | 549 | struct ttm_bo_driver *driver; |
532 | rwlock_t vm_lock; | 550 | rwlock_t vm_lock; |
533 | struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; | 551 | struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; |
552 | spinlock_t fence_lock; | ||
534 | /* | 553 | /* |
535 | * Protected by the vm lock. | 554 | * Protected by the vm lock. |
536 | */ | 555 | */ |
@@ -541,6 +560,7 @@ struct ttm_bo_device { | |||
541 | * Protected by the global:lru lock. | 560 | * Protected by the global:lru lock. |
542 | */ | 561 | */ |
543 | struct list_head ddestroy; | 562 | struct list_head ddestroy; |
563 | uint32_t val_seq; | ||
544 | 564 | ||
545 | /* | 565 | /* |
546 | * Protected by load / firstopen / lastclose /unload sync. | 566 | * Protected by load / firstopen / lastclose /unload sync. |
@@ -753,31 +773,6 @@ extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, | |||
753 | 773 | ||
754 | extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); | 774 | extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); |
755 | 775 | ||
756 | /** | ||
757 | * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory. | ||
758 | * | ||
759 | * @bo Pointer to a struct ttm_buffer_object. | ||
760 | * @bus_base On return the base of the PCI region | ||
761 | * @bus_offset On return the byte offset into the PCI region | ||
762 | * @bus_size On return the byte size of the buffer object or zero if | ||
763 | * the buffer object memory is not accessible through a PCI region. | ||
764 | * | ||
765 | * Returns: | ||
766 | * -EINVAL if the buffer object is currently not mappable. | ||
767 | * 0 otherwise. | ||
768 | */ | ||
769 | |||
770 | extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, | ||
771 | struct ttm_mem_reg *mem, | ||
772 | unsigned long *bus_base, | ||
773 | unsigned long *bus_offset, | ||
774 | unsigned long *bus_size); | ||
775 | |||
776 | extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev, | ||
777 | struct ttm_mem_reg *mem); | ||
778 | extern void ttm_mem_io_free(struct ttm_bo_device *bdev, | ||
779 | struct ttm_mem_reg *mem); | ||
780 | |||
781 | extern void ttm_bo_global_release(struct drm_global_reference *ref); | 776 | extern void ttm_bo_global_release(struct drm_global_reference *ref); |
782 | extern int ttm_bo_global_init(struct drm_global_reference *ref); | 777 | extern int ttm_bo_global_init(struct drm_global_reference *ref); |
783 | 778 | ||
@@ -810,6 +805,22 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
810 | extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | 805 | extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); |
811 | 806 | ||
812 | /** | 807 | /** |
808 | * ttm_bo_unmap_virtual | ||
809 | * | ||
810 | * @bo: tear down the virtual mappings for this BO | ||
811 | * | ||
812 | * The caller must take ttm_mem_io_lock before calling this function. | ||
813 | */ | ||
814 | extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); | ||
815 | |||
816 | extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); | ||
817 | extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); | ||
818 | extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, | ||
819 | bool interruptible); | ||
820 | extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); | ||
821 | |||
822 | |||
823 | /** | ||
813 | * ttm_bo_reserve: | 824 | * ttm_bo_reserve: |
814 | * | 825 | * |
815 | * @bo: A pointer to a struct ttm_buffer_object. | 826 | * @bo: A pointer to a struct ttm_buffer_object. |
@@ -859,11 +870,44 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | |||
859 | * try again. (only if use_sequence == 1). | 870 | * try again. (only if use_sequence == 1). |
860 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by | 871 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
861 | * a signal. Release all buffer reservations and return to user-space. | 872 | * a signal. Release all buffer reservations and return to user-space. |
873 | * -EBUSY: The function needed to sleep, but @no_wait was true | ||
874 | * -EDEADLK: Bo already reserved using @sequence. This error code will only | ||
875 | * be returned if @use_sequence is set to true. | ||
862 | */ | 876 | */ |
863 | extern int ttm_bo_reserve(struct ttm_buffer_object *bo, | 877 | extern int ttm_bo_reserve(struct ttm_buffer_object *bo, |
864 | bool interruptible, | 878 | bool interruptible, |
865 | bool no_wait, bool use_sequence, uint32_t sequence); | 879 | bool no_wait, bool use_sequence, uint32_t sequence); |
866 | 880 | ||
881 | |||
882 | /** | ||
883 | * ttm_bo_reserve_locked: | ||
884 | * | ||
885 | * @bo: A pointer to a struct ttm_buffer_object. | ||
886 | * @interruptible: Sleep interruptible if waiting. | ||
887 | * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. | ||
888 | * @use_sequence: If @bo is already reserved, Only sleep waiting for | ||
889 | * it to become unreserved if @sequence < (@bo)->sequence. | ||
890 | * | ||
891 | * Must be called with struct ttm_bo_global::lru_lock held, | ||
892 | * and will not remove reserved buffers from the lru lists. | ||
893 | * The function may release the LRU spinlock if it needs to sleep. | ||
894 | * Otherwise identical to ttm_bo_reserve. | ||
895 | * | ||
896 | * Returns: | ||
897 | * -EAGAIN: The reservation may cause a deadlock. | ||
898 | * Release all buffer reservations, wait for @bo to become unreserved and | ||
899 | * try again. (only if use_sequence == 1). | ||
900 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by | ||
901 | * a signal. Release all buffer reservations and return to user-space. | ||
902 | * -EBUSY: The function needed to sleep, but @no_wait was true | ||
903 | * -EDEADLK: Bo already reserved using @sequence. This error code will only | ||
904 | * be returned if @use_sequence is set to true. | ||
905 | */ | ||
906 | extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | ||
907 | bool interruptible, | ||
908 | bool no_wait, bool use_sequence, | ||
909 | uint32_t sequence); | ||
910 | |||
867 | /** | 911 | /** |
868 | * ttm_bo_unreserve | 912 | * ttm_bo_unreserve |
869 | * | 913 | * |
@@ -874,6 +918,16 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, | |||
874 | extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); | 918 | extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); |
875 | 919 | ||
876 | /** | 920 | /** |
921 | * ttm_bo_unreserve_locked | ||
922 | * | ||
923 | * @bo: A pointer to a struct ttm_buffer_object. | ||
924 | * | ||
925 | * Unreserve a previous reservation of @bo. | ||
926 | * Needs to be called with struct ttm_bo_global::lru_lock held. | ||
927 | */ | ||
928 | extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); | ||
929 | |||
930 | /** | ||
877 | * ttm_bo_wait_unreserved | 931 | * ttm_bo_wait_unreserved |
878 | * | 932 | * |
879 | * @bo: A pointer to a struct ttm_buffer_object. | 933 | * @bo: A pointer to a struct ttm_buffer_object. |
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index cd2c475da9ea..26cc7f9ffa41 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h | |||
@@ -41,7 +41,10 @@ | |||
41 | * @bo: refcounted buffer object pointer. | 41 | * @bo: refcounted buffer object pointer. |
42 | * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once | 42 | * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once |
43 | * adding a new sync object. | 43 | * adding a new sync object. |
44 | * @reservied: Indicates whether @bo has been reserved for validation. | 44 | * @reserved: Indicates whether @bo has been reserved for validation. |
45 | * @removed: Indicates whether @bo has been removed from lru lists. | ||
46 | * @put_count: Number of outstanding references on bo::list_kref. | ||
47 | * @old_sync_obj: Pointer to a sync object about to be unreferenced | ||
45 | */ | 48 | */ |
46 | 49 | ||
47 | struct ttm_validate_buffer { | 50 | struct ttm_validate_buffer { |
@@ -49,6 +52,9 @@ struct ttm_validate_buffer { | |||
49 | struct ttm_buffer_object *bo; | 52 | struct ttm_buffer_object *bo; |
50 | void *new_sync_obj_arg; | 53 | void *new_sync_obj_arg; |
51 | bool reserved; | 54 | bool reserved; |
55 | bool removed; | ||
56 | int put_count; | ||
57 | void *old_sync_obj; | ||
52 | }; | 58 | }; |
53 | 59 | ||
54 | /** | 60 | /** |
@@ -66,7 +72,6 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); | |||
66 | * function ttm_eu_reserve_buffers | 72 | * function ttm_eu_reserve_buffers |
67 | * | 73 | * |
68 | * @list: thread private list of ttm_validate_buffer structs. | 74 | * @list: thread private list of ttm_validate_buffer structs. |
69 | * @val_seq: A unique sequence number. | ||
70 | * | 75 | * |
71 | * Tries to reserve bos pointed to by the list entries for validation. | 76 | * Tries to reserve bos pointed to by the list entries for validation. |
72 | * If the function returns 0, all buffers are marked as "unfenced", | 77 | * If the function returns 0, all buffers are marked as "unfenced", |
@@ -88,7 +93,7 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); | |||
88 | * has failed. | 93 | * has failed. |
89 | */ | 94 | */ |
90 | 95 | ||
91 | extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); | 96 | extern int ttm_eu_reserve_buffers(struct list_head *list); |
92 | 97 | ||
93 | /** | 98 | /** |
94 | * function ttm_eu_fence_buffer_objects. | 99 | * function ttm_eu_fence_buffer_objects. |
diff --git a/include/linux/kref.h b/include/linux/kref.h index 6cc38fc07ab7..d4a62ab2ee5e 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h | |||
@@ -24,5 +24,7 @@ struct kref { | |||
24 | void kref_init(struct kref *kref); | 24 | void kref_init(struct kref *kref); |
25 | void kref_get(struct kref *kref); | 25 | void kref_get(struct kref *kref); |
26 | int kref_put(struct kref *kref, void (*release) (struct kref *kref)); | 26 | int kref_put(struct kref *kref, void (*release) (struct kref *kref)); |
27 | int kref_sub(struct kref *kref, unsigned int count, | ||
28 | void (*release) (struct kref *kref)); | ||
27 | 29 | ||
28 | #endif /* _KREF_H_ */ | 30 | #endif /* _KREF_H_ */ |
diff --git a/lib/kref.c b/lib/kref.c index d3d227a08a4b..3efb882b11db 100644 --- a/lib/kref.c +++ b/lib/kref.c | |||
@@ -62,6 +62,36 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref)) | |||
62 | return 0; | 62 | return 0; |
63 | } | 63 | } |
64 | 64 | ||
65 | |||
66 | /** | ||
67 | * kref_sub - subtract a number of refcounts for object. | ||
68 | * @kref: object. | ||
69 | * @count: Number of recounts to subtract. | ||
70 | * @release: pointer to the function that will clean up the object when the | ||
71 | * last reference to the object is released. | ||
72 | * This pointer is required, and it is not acceptable to pass kfree | ||
73 | * in as this function. | ||
74 | * | ||
75 | * Subtract @count from the refcount, and if 0, call release(). | ||
76 | * Return 1 if the object was removed, otherwise return 0. Beware, if this | ||
77 | * function returns 0, you still can not count on the kref from remaining in | ||
78 | * memory. Only use the return value if you want to see if the kref is now | ||
79 | * gone, not present. | ||
80 | */ | ||
81 | int kref_sub(struct kref *kref, unsigned int count, | ||
82 | void (*release)(struct kref *kref)) | ||
83 | { | ||
84 | WARN_ON(release == NULL); | ||
85 | WARN_ON(release == (void (*)(struct kref *))kfree); | ||
86 | |||
87 | if (atomic_sub_and_test((int) count, &kref->refcount)) { | ||
88 | release(kref); | ||
89 | return 1; | ||
90 | } | ||
91 | return 0; | ||
92 | } | ||
93 | |||
65 | EXPORT_SYMBOL(kref_init); | 94 | EXPORT_SYMBOL(kref_init); |
66 | EXPORT_SYMBOL(kref_get); | 95 | EXPORT_SYMBOL(kref_get); |
67 | EXPORT_SYMBOL(kref_put); | 96 | EXPORT_SYMBOL(kref_put); |
97 | EXPORT_SYMBOL(kref_sub); | ||