diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 287 |
1 files changed, 280 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a17d6bdfe63e..ba1d8314c1ce 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -269,6 +269,57 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
269 | drm_sysfs_hotplug_event(dev); | 269 | drm_sysfs_hotplug_event(dev); |
270 | } | 270 | } |
271 | 271 | ||
272 | static void i915_handle_rps_change(struct drm_device *dev) | ||
273 | { | ||
274 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
275 | u32 busy_up, busy_down, max_avg, min_avg; | ||
276 | u16 rgvswctl; | ||
277 | u8 new_delay = dev_priv->cur_delay; | ||
278 | |||
279 | I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG); | ||
280 | busy_up = I915_READ(RCPREVBSYTUPAVG); | ||
281 | busy_down = I915_READ(RCPREVBSYTDNAVG); | ||
282 | max_avg = I915_READ(RCBMAXAVG); | ||
283 | min_avg = I915_READ(RCBMINAVG); | ||
284 | |||
285 | /* Handle RCS change request from hw */ | ||
286 | if (busy_up > max_avg) { | ||
287 | if (dev_priv->cur_delay != dev_priv->max_delay) | ||
288 | new_delay = dev_priv->cur_delay - 1; | ||
289 | if (new_delay < dev_priv->max_delay) | ||
290 | new_delay = dev_priv->max_delay; | ||
291 | } else if (busy_down < min_avg) { | ||
292 | if (dev_priv->cur_delay != dev_priv->min_delay) | ||
293 | new_delay = dev_priv->cur_delay + 1; | ||
294 | if (new_delay > dev_priv->min_delay) | ||
295 | new_delay = dev_priv->min_delay; | ||
296 | } | ||
297 | |||
298 | DRM_DEBUG("rps change requested: %d -> %d\n", | ||
299 | dev_priv->cur_delay, new_delay); | ||
300 | |||
301 | rgvswctl = I915_READ(MEMSWCTL); | ||
302 | if (rgvswctl & MEMCTL_CMD_STS) { | ||
303 | DRM_ERROR("gpu busy, RCS change rejected\n"); | ||
304 | return; /* still busy with another command */ | ||
305 | } | ||
306 | |||
307 | /* Program the new state */ | ||
308 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | ||
309 | (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | ||
310 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
311 | POSTING_READ(MEMSWCTL); | ||
312 | |||
313 | rgvswctl |= MEMCTL_CMD_STS; | ||
314 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
315 | |||
316 | dev_priv->cur_delay = new_delay; | ||
317 | |||
318 | DRM_DEBUG("rps changed\n"); | ||
319 | |||
320 | return; | ||
321 | } | ||
322 | |||
272 | irqreturn_t ironlake_irq_handler(struct drm_device *dev) | 323 | irqreturn_t ironlake_irq_handler(struct drm_device *dev) |
273 | { | 324 | { |
274 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 325 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -331,6 +382,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
331 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 382 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
332 | } | 383 | } |
333 | 384 | ||
385 | if (de_iir & DE_PCU_EVENT) { | ||
386 | I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS)); | ||
387 | i915_handle_rps_change(dev); | ||
388 | } | ||
389 | |||
334 | /* should clear PCH hotplug event before clear CPU irq */ | 390 | /* should clear PCH hotplug event before clear CPU irq */ |
335 | I915_WRITE(SDEIIR, pch_iir); | 391 | I915_WRITE(SDEIIR, pch_iir); |
336 | I915_WRITE(GTIIR, gt_iir); | 392 | I915_WRITE(GTIIR, gt_iir); |
@@ -376,6 +432,121 @@ static void i915_error_work_func(struct work_struct *work) | |||
376 | } | 432 | } |
377 | } | 433 | } |
378 | 434 | ||
435 | static struct drm_i915_error_object * | ||
436 | i915_error_object_create(struct drm_device *dev, | ||
437 | struct drm_gem_object *src) | ||
438 | { | ||
439 | struct drm_i915_error_object *dst; | ||
440 | struct drm_i915_gem_object *src_priv; | ||
441 | int page, page_count; | ||
442 | |||
443 | if (src == NULL) | ||
444 | return NULL; | ||
445 | |||
446 | src_priv = src->driver_private; | ||
447 | if (src_priv->pages == NULL) | ||
448 | return NULL; | ||
449 | |||
450 | page_count = src->size / PAGE_SIZE; | ||
451 | |||
452 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); | ||
453 | if (dst == NULL) | ||
454 | return NULL; | ||
455 | |||
456 | for (page = 0; page < page_count; page++) { | ||
457 | void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC); | ||
458 | if (d == NULL) | ||
459 | goto unwind; | ||
460 | s = kmap_atomic(src_priv->pages[page], KM_USER0); | ||
461 | memcpy(d, s, PAGE_SIZE); | ||
462 | kunmap_atomic(s, KM_USER0); | ||
463 | dst->pages[page] = d; | ||
464 | } | ||
465 | dst->page_count = page_count; | ||
466 | dst->gtt_offset = src_priv->gtt_offset; | ||
467 | |||
468 | return dst; | ||
469 | |||
470 | unwind: | ||
471 | while (page--) | ||
472 | kfree(dst->pages[page]); | ||
473 | kfree(dst); | ||
474 | return NULL; | ||
475 | } | ||
476 | |||
477 | static void | ||
478 | i915_error_object_free(struct drm_i915_error_object *obj) | ||
479 | { | ||
480 | int page; | ||
481 | |||
482 | if (obj == NULL) | ||
483 | return; | ||
484 | |||
485 | for (page = 0; page < obj->page_count; page++) | ||
486 | kfree(obj->pages[page]); | ||
487 | |||
488 | kfree(obj); | ||
489 | } | ||
490 | |||
491 | static void | ||
492 | i915_error_state_free(struct drm_device *dev, | ||
493 | struct drm_i915_error_state *error) | ||
494 | { | ||
495 | i915_error_object_free(error->batchbuffer[0]); | ||
496 | i915_error_object_free(error->batchbuffer[1]); | ||
497 | i915_error_object_free(error->ringbuffer); | ||
498 | kfree(error->active_bo); | ||
499 | kfree(error); | ||
500 | } | ||
501 | |||
502 | static u32 | ||
503 | i915_get_bbaddr(struct drm_device *dev, u32 *ring) | ||
504 | { | ||
505 | u32 cmd; | ||
506 | |||
507 | if (IS_I830(dev) || IS_845G(dev)) | ||
508 | cmd = MI_BATCH_BUFFER; | ||
509 | else if (IS_I965G(dev)) | ||
510 | cmd = (MI_BATCH_BUFFER_START | (2 << 6) | | ||
511 | MI_BATCH_NON_SECURE_I965); | ||
512 | else | ||
513 | cmd = (MI_BATCH_BUFFER_START | (2 << 6)); | ||
514 | |||
515 | return ring[0] == cmd ? ring[1] : 0; | ||
516 | } | ||
517 | |||
518 | static u32 | ||
519 | i915_ringbuffer_last_batch(struct drm_device *dev) | ||
520 | { | ||
521 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
522 | u32 head, bbaddr; | ||
523 | u32 *ring; | ||
524 | |||
525 | /* Locate the current position in the ringbuffer and walk back | ||
526 | * to find the most recently dispatched batch buffer. | ||
527 | */ | ||
528 | bbaddr = 0; | ||
529 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
530 | ring = (u32 *)(dev_priv->ring.virtual_start + head); | ||
531 | |||
532 | while (--ring >= (u32 *)dev_priv->ring.virtual_start) { | ||
533 | bbaddr = i915_get_bbaddr(dev, ring); | ||
534 | if (bbaddr) | ||
535 | break; | ||
536 | } | ||
537 | |||
538 | if (bbaddr == 0) { | ||
539 | ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size); | ||
540 | while (--ring >= (u32 *)dev_priv->ring.virtual_start) { | ||
541 | bbaddr = i915_get_bbaddr(dev, ring); | ||
542 | if (bbaddr) | ||
543 | break; | ||
544 | } | ||
545 | } | ||
546 | |||
547 | return bbaddr; | ||
548 | } | ||
549 | |||
379 | /** | 550 | /** |
380 | * i915_capture_error_state - capture an error record for later analysis | 551 | * i915_capture_error_state - capture an error record for later analysis |
381 | * @dev: drm device | 552 | * @dev: drm device |
@@ -388,19 +559,26 @@ static void i915_error_work_func(struct work_struct *work) | |||
388 | static void i915_capture_error_state(struct drm_device *dev) | 559 | static void i915_capture_error_state(struct drm_device *dev) |
389 | { | 560 | { |
390 | struct drm_i915_private *dev_priv = dev->dev_private; | 561 | struct drm_i915_private *dev_priv = dev->dev_private; |
562 | struct drm_i915_gem_object *obj_priv; | ||
391 | struct drm_i915_error_state *error; | 563 | struct drm_i915_error_state *error; |
564 | struct drm_gem_object *batchbuffer[2]; | ||
392 | unsigned long flags; | 565 | unsigned long flags; |
566 | u32 bbaddr; | ||
567 | int count; | ||
393 | 568 | ||
394 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 569 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
395 | if (dev_priv->first_error) | 570 | error = dev_priv->first_error; |
396 | goto out; | 571 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
572 | if (error) | ||
573 | return; | ||
397 | 574 | ||
398 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 575 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
399 | if (!error) { | 576 | if (!error) { |
400 | DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n"); | 577 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
401 | goto out; | 578 | return; |
402 | } | 579 | } |
403 | 580 | ||
581 | error->seqno = i915_get_gem_seqno(dev); | ||
404 | error->eir = I915_READ(EIR); | 582 | error->eir = I915_READ(EIR); |
405 | error->pgtbl_er = I915_READ(PGTBL_ER); | 583 | error->pgtbl_er = I915_READ(PGTBL_ER); |
406 | error->pipeastat = I915_READ(PIPEASTAT); | 584 | error->pipeastat = I915_READ(PIPEASTAT); |
@@ -411,6 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
411 | error->ipehr = I915_READ(IPEHR); | 589 | error->ipehr = I915_READ(IPEHR); |
412 | error->instdone = I915_READ(INSTDONE); | 590 | error->instdone = I915_READ(INSTDONE); |
413 | error->acthd = I915_READ(ACTHD); | 591 | error->acthd = I915_READ(ACTHD); |
592 | error->bbaddr = 0; | ||
414 | } else { | 593 | } else { |
415 | error->ipeir = I915_READ(IPEIR_I965); | 594 | error->ipeir = I915_READ(IPEIR_I965); |
416 | error->ipehr = I915_READ(IPEHR_I965); | 595 | error->ipehr = I915_READ(IPEHR_I965); |
@@ -418,14 +597,101 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
418 | error->instps = I915_READ(INSTPS); | 597 | error->instps = I915_READ(INSTPS); |
419 | error->instdone1 = I915_READ(INSTDONE1); | 598 | error->instdone1 = I915_READ(INSTDONE1); |
420 | error->acthd = I915_READ(ACTHD_I965); | 599 | error->acthd = I915_READ(ACTHD_I965); |
600 | error->bbaddr = I915_READ64(BB_ADDR); | ||
421 | } | 601 | } |
422 | 602 | ||
423 | do_gettimeofday(&error->time); | 603 | bbaddr = i915_ringbuffer_last_batch(dev); |
604 | |||
605 | /* Grab the current batchbuffer, most likely to have crashed. */ | ||
606 | batchbuffer[0] = NULL; | ||
607 | batchbuffer[1] = NULL; | ||
608 | count = 0; | ||
609 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | ||
610 | struct drm_gem_object *obj = obj_priv->obj; | ||
611 | |||
612 | if (batchbuffer[0] == NULL && | ||
613 | bbaddr >= obj_priv->gtt_offset && | ||
614 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
615 | batchbuffer[0] = obj; | ||
616 | |||
617 | if (batchbuffer[1] == NULL && | ||
618 | error->acthd >= obj_priv->gtt_offset && | ||
619 | error->acthd < obj_priv->gtt_offset + obj->size && | ||
620 | batchbuffer[0] != obj) | ||
621 | batchbuffer[1] = obj; | ||
424 | 622 | ||
425 | dev_priv->first_error = error; | 623 | count++; |
624 | } | ||
625 | |||
626 | /* We need to copy these to an anonymous buffer as the simplest | ||
627 | * method to avoid being overwritten by userpace. | ||
628 | */ | ||
629 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); | ||
630 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | ||
631 | |||
632 | /* Record the ringbuffer */ | ||
633 | error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj); | ||
634 | |||
635 | /* Record buffers on the active list. */ | ||
636 | error->active_bo = NULL; | ||
637 | error->active_bo_count = 0; | ||
638 | |||
639 | if (count) | ||
640 | error->active_bo = kmalloc(sizeof(*error->active_bo)*count, | ||
641 | GFP_ATOMIC); | ||
642 | |||
643 | if (error->active_bo) { | ||
644 | int i = 0; | ||
645 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | ||
646 | struct drm_gem_object *obj = obj_priv->obj; | ||
647 | |||
648 | error->active_bo[i].size = obj->size; | ||
649 | error->active_bo[i].name = obj->name; | ||
650 | error->active_bo[i].seqno = obj_priv->last_rendering_seqno; | ||
651 | error->active_bo[i].gtt_offset = obj_priv->gtt_offset; | ||
652 | error->active_bo[i].read_domains = obj->read_domains; | ||
653 | error->active_bo[i].write_domain = obj->write_domain; | ||
654 | error->active_bo[i].fence_reg = obj_priv->fence_reg; | ||
655 | error->active_bo[i].pinned = 0; | ||
656 | if (obj_priv->pin_count > 0) | ||
657 | error->active_bo[i].pinned = 1; | ||
658 | if (obj_priv->user_pin_count > 0) | ||
659 | error->active_bo[i].pinned = -1; | ||
660 | error->active_bo[i].tiling = obj_priv->tiling_mode; | ||
661 | error->active_bo[i].dirty = obj_priv->dirty; | ||
662 | error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; | ||
663 | |||
664 | if (++i == count) | ||
665 | break; | ||
666 | } | ||
667 | error->active_bo_count = i; | ||
668 | } | ||
426 | 669 | ||
427 | out: | 670 | do_gettimeofday(&error->time); |
671 | |||
672 | spin_lock_irqsave(&dev_priv->error_lock, flags); | ||
673 | if (dev_priv->first_error == NULL) { | ||
674 | dev_priv->first_error = error; | ||
675 | error = NULL; | ||
676 | } | ||
428 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 677 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
678 | |||
679 | if (error) | ||
680 | i915_error_state_free(dev, error); | ||
681 | } | ||
682 | |||
683 | void i915_destroy_error_state(struct drm_device *dev) | ||
684 | { | ||
685 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
686 | struct drm_i915_error_state *error; | ||
687 | |||
688 | spin_lock(&dev_priv->error_lock); | ||
689 | error = dev_priv->first_error; | ||
690 | dev_priv->first_error = NULL; | ||
691 | spin_unlock(&dev_priv->error_lock); | ||
692 | |||
693 | if (error) | ||
694 | i915_error_state_free(dev, error); | ||
429 | } | 695 | } |
430 | 696 | ||
431 | /** | 697 | /** |
@@ -1064,6 +1330,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1064 | I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); | 1330 | I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); |
1065 | (void) I915_READ(SDEIER); | 1331 | (void) I915_READ(SDEIER); |
1066 | 1332 | ||
1333 | if (IS_IRONLAKE_M(dev)) { | ||
1334 | /* Clear & enable PCU event interrupts */ | ||
1335 | I915_WRITE(DEIIR, DE_PCU_EVENT); | ||
1336 | I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); | ||
1337 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); | ||
1338 | } | ||
1339 | |||
1067 | return 0; | 1340 | return 0; |
1068 | } | 1341 | } |
1069 | 1342 | ||