diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-14 03:39:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-14 03:39:08 -0400 |
commit | 2d65a9f48fcdf7866aab6457bc707ca233e0c791 (patch) | |
tree | f93e5838d6ac2e59434367f4ff905f7d9c45fc2b /drivers/gpu/drm/i915/i915_gpu_error.c | |
parent | da92da3638a04894afdca8b99e973ddd20268471 (diff) | |
parent | dfda0df3426483cf5fc7441f23f318edbabecb03 (diff) |
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"This is the main git pull for the drm,
I pretty much froze major pulls at -rc5/6 time, and haven't had much
fallout, so will probably continue doing that.
Lots of changes all over, big internal header cleanup to make it clear
drm features are legacy things and what are things that modern KMS
drivers should be using. Also big move to use the new generic fences
in all the TTM drivers.
core:
atomic prep work,
vblank rework changes, allows immediate vblank disables
major header reworking and cleanups to better delinate legacy
interfaces from what KMS drivers should be using.
cursor planes locking fixes
ttm:
move to generic fences (affects all TTM drivers)
ppc64 caching fixes
radeon:
userptr support,
uvd for old asics,
reset rework for fence changes
better buffer placement changes,
dpm feature enablement
hdmi audio support fixes
intel:
Cherryview work,
180 degree rotation,
skylake prep work,
execlist command submission
full ppgtt prep work
cursor improvements
edid caching,
vdd handling improvements
nouveau:
fence reworking
kepler memory clock work
gt21x clock work
fan control improvements
hdmi infoframe fixes
DP audio
ast:
ppc64 fixes
caching fix
rcar:
rcar-du DT support
ipuv3:
prep work for capture support
msm:
LVDS support for mdp4, new panel, gpu refactoring
exynos:
exynos3250 SoC support, drop bad mmap interface,
mipi dsi changes, and component match support"
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (640 commits)
drm/mst: rework payload table allocation to conform better.
drm/ast: Fix HW cursor image
drm/radeon/kv: add uvd/vce info to dpm debugfs output
drm/radeon/ci: add uvd/vce info to dpm debugfs output
drm/radeon: export reservation_object from dmabuf to ttm
drm/radeon: cope with foreign fences inside the reservation object
drm/radeon: cope with foreign fences inside display
drm/core: use helper to check driver features
drm/radeon/cik: write gfx ucode version to ucode addr reg
drm/radeon/si: print full CS when we hit a packet 0
drm/radeon: remove unecessary includes
drm/radeon/combios: declare legacy_connector_convert as static
drm/radeon/atombios: declare connector convert tables as static
drm/radeon: drop btc_get_max_clock_from_voltage_dependency_table
drm/radeon/dpm: drop clk/voltage dependency filters for BTC
drm/radeon/dpm: drop clk/voltage dependency filters for CI
drm/radeon/dpm: drop clk/voltage dependency filters for SI
drm/radeon/dpm: drop clk/voltage dependency filters for NI
drm/radeon: disable audio when we disable hdmi (v2)
drm/radeon: split audio enable between eg and r600 (v2)
...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gpu_error.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gpu_error.c | 196 |
1 files changed, 129 insertions, 67 deletions
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index eab41f9390f8..2c87a797213f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -192,10 +192,10 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, | |||
192 | struct drm_i915_error_buffer *err, | 192 | struct drm_i915_error_buffer *err, |
193 | int count) | 193 | int count) |
194 | { | 194 | { |
195 | err_printf(m, "%s [%d]:\n", name, count); | 195 | err_printf(m, " %s [%d]:\n", name, count); |
196 | 196 | ||
197 | while (count--) { | 197 | while (count--) { |
198 | err_printf(m, " %08x %8u %02x %02x %x %x", | 198 | err_printf(m, " %08x %8u %02x %02x %x %x", |
199 | err->gtt_offset, | 199 | err->gtt_offset, |
200 | err->size, | 200 | err->size, |
201 | err->read_domains, | 201 | err->read_domains, |
@@ -208,7 +208,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, | |||
208 | err_puts(m, err->userptr ? " userptr" : ""); | 208 | err_puts(m, err->userptr ? " userptr" : ""); |
209 | err_puts(m, err->ring != -1 ? " " : ""); | 209 | err_puts(m, err->ring != -1 ? " " : ""); |
210 | err_puts(m, ring_str(err->ring)); | 210 | err_puts(m, ring_str(err->ring)); |
211 | err_puts(m, i915_cache_level_str(err->cache_level)); | 211 | err_puts(m, i915_cache_level_str(m->i915, err->cache_level)); |
212 | 212 | ||
213 | if (err->name) | 213 | if (err->name) |
214 | err_printf(m, " (name: %d)", err->name); | 214 | err_printf(m, " (name: %d)", err->name); |
@@ -393,15 +393,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
393 | i915_ring_error_state(m, dev, &error->ring[i]); | 393 | i915_ring_error_state(m, dev, &error->ring[i]); |
394 | } | 394 | } |
395 | 395 | ||
396 | if (error->active_bo) | 396 | for (i = 0; i < error->vm_count; i++) { |
397 | err_printf(m, "vm[%d]\n", i); | ||
398 | |||
397 | print_error_buffers(m, "Active", | 399 | print_error_buffers(m, "Active", |
398 | error->active_bo[0], | 400 | error->active_bo[i], |
399 | error->active_bo_count[0]); | 401 | error->active_bo_count[i]); |
400 | 402 | ||
401 | if (error->pinned_bo) | ||
402 | print_error_buffers(m, "Pinned", | 403 | print_error_buffers(m, "Pinned", |
403 | error->pinned_bo[0], | 404 | error->pinned_bo[i], |
404 | error->pinned_bo_count[0]); | 405 | error->pinned_bo_count[i]); |
406 | } | ||
405 | 407 | ||
406 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { | 408 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { |
407 | obj = error->ring[i].batchbuffer; | 409 | obj = error->ring[i].batchbuffer; |
@@ -492,9 +494,11 @@ out: | |||
492 | } | 494 | } |
493 | 495 | ||
494 | int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, | 496 | int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, |
497 | struct drm_i915_private *i915, | ||
495 | size_t count, loff_t pos) | 498 | size_t count, loff_t pos) |
496 | { | 499 | { |
497 | memset(ebuf, 0, sizeof(*ebuf)); | 500 | memset(ebuf, 0, sizeof(*ebuf)); |
501 | ebuf->i915 = i915; | ||
498 | 502 | ||
499 | /* We need to have enough room to store any i915_error_state printf | 503 | /* We need to have enough room to store any i915_error_state printf |
500 | * so that we can move it to start position. | 504 | * so that we can move it to start position. |
@@ -556,24 +560,54 @@ static void i915_error_state_free(struct kref *error_ref) | |||
556 | } | 560 | } |
557 | 561 | ||
558 | static struct drm_i915_error_object * | 562 | static struct drm_i915_error_object * |
559 | i915_error_object_create_sized(struct drm_i915_private *dev_priv, | 563 | i915_error_object_create(struct drm_i915_private *dev_priv, |
560 | struct drm_i915_gem_object *src, | 564 | struct drm_i915_gem_object *src, |
561 | struct i915_address_space *vm, | 565 | struct i915_address_space *vm) |
562 | const int num_pages) | ||
563 | { | 566 | { |
564 | struct drm_i915_error_object *dst; | 567 | struct drm_i915_error_object *dst; |
565 | int i; | 568 | int num_pages; |
569 | bool use_ggtt; | ||
570 | int i = 0; | ||
566 | u32 reloc_offset; | 571 | u32 reloc_offset; |
567 | 572 | ||
568 | if (src == NULL || src->pages == NULL) | 573 | if (src == NULL || src->pages == NULL) |
569 | return NULL; | 574 | return NULL; |
570 | 575 | ||
576 | num_pages = src->base.size >> PAGE_SHIFT; | ||
577 | |||
571 | dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); | 578 | dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); |
572 | if (dst == NULL) | 579 | if (dst == NULL) |
573 | return NULL; | 580 | return NULL; |
574 | 581 | ||
575 | reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm); | 582 | if (i915_gem_obj_bound(src, vm)) |
576 | for (i = 0; i < num_pages; i++) { | 583 | dst->gtt_offset = i915_gem_obj_offset(src, vm); |
584 | else | ||
585 | dst->gtt_offset = -1; | ||
586 | |||
587 | reloc_offset = dst->gtt_offset; | ||
588 | use_ggtt = (src->cache_level == I915_CACHE_NONE && | ||
589 | i915_is_ggtt(vm) && | ||
590 | src->has_global_gtt_mapping && | ||
591 | reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end); | ||
592 | |||
593 | /* Cannot access stolen address directly, try to use the aperture */ | ||
594 | if (src->stolen) { | ||
595 | use_ggtt = true; | ||
596 | |||
597 | if (!src->has_global_gtt_mapping) | ||
598 | goto unwind; | ||
599 | |||
600 | reloc_offset = i915_gem_obj_ggtt_offset(src); | ||
601 | if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end) | ||
602 | goto unwind; | ||
603 | } | ||
604 | |||
605 | /* Cannot access snooped pages through the aperture */ | ||
606 | if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev)) | ||
607 | goto unwind; | ||
608 | |||
609 | dst->page_count = num_pages; | ||
610 | while (num_pages--) { | ||
577 | unsigned long flags; | 611 | unsigned long flags; |
578 | void *d; | 612 | void *d; |
579 | 613 | ||
@@ -582,10 +616,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, | |||
582 | goto unwind; | 616 | goto unwind; |
583 | 617 | ||
584 | local_irq_save(flags); | 618 | local_irq_save(flags); |
585 | if (src->cache_level == I915_CACHE_NONE && | 619 | if (use_ggtt) { |
586 | reloc_offset < dev_priv->gtt.mappable_end && | ||
587 | src->has_global_gtt_mapping && | ||
588 | i915_is_ggtt(vm)) { | ||
589 | void __iomem *s; | 620 | void __iomem *s; |
590 | 621 | ||
591 | /* Simply ignore tiling or any overlapping fence. | 622 | /* Simply ignore tiling or any overlapping fence. |
@@ -597,14 +628,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, | |||
597 | reloc_offset); | 628 | reloc_offset); |
598 | memcpy_fromio(d, s, PAGE_SIZE); | 629 | memcpy_fromio(d, s, PAGE_SIZE); |
599 | io_mapping_unmap_atomic(s); | 630 | io_mapping_unmap_atomic(s); |
600 | } else if (src->stolen) { | ||
601 | unsigned long offset; | ||
602 | |||
603 | offset = dev_priv->mm.stolen_base; | ||
604 | offset += src->stolen->start; | ||
605 | offset += i << PAGE_SHIFT; | ||
606 | |||
607 | memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); | ||
608 | } else { | 631 | } else { |
609 | struct page *page; | 632 | struct page *page; |
610 | void *s; | 633 | void *s; |
@@ -621,11 +644,9 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, | |||
621 | } | 644 | } |
622 | local_irq_restore(flags); | 645 | local_irq_restore(flags); |
623 | 646 | ||
624 | dst->pages[i] = d; | 647 | dst->pages[i++] = d; |
625 | |||
626 | reloc_offset += PAGE_SIZE; | 648 | reloc_offset += PAGE_SIZE; |
627 | } | 649 | } |
628 | dst->page_count = num_pages; | ||
629 | 650 | ||
630 | return dst; | 651 | return dst; |
631 | 652 | ||
@@ -635,22 +656,19 @@ unwind: | |||
635 | kfree(dst); | 656 | kfree(dst); |
636 | return NULL; | 657 | return NULL; |
637 | } | 658 | } |
638 | #define i915_error_object_create(dev_priv, src, vm) \ | ||
639 | i915_error_object_create_sized((dev_priv), (src), (vm), \ | ||
640 | (src)->base.size>>PAGE_SHIFT) | ||
641 | |||
642 | #define i915_error_ggtt_object_create(dev_priv, src) \ | 659 | #define i915_error_ggtt_object_create(dev_priv, src) \ |
643 | i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \ | 660 | i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base) |
644 | (src)->base.size>>PAGE_SHIFT) | ||
645 | 661 | ||
646 | static void capture_bo(struct drm_i915_error_buffer *err, | 662 | static void capture_bo(struct drm_i915_error_buffer *err, |
647 | struct drm_i915_gem_object *obj) | 663 | struct i915_vma *vma) |
648 | { | 664 | { |
665 | struct drm_i915_gem_object *obj = vma->obj; | ||
666 | |||
649 | err->size = obj->base.size; | 667 | err->size = obj->base.size; |
650 | err->name = obj->base.name; | 668 | err->name = obj->base.name; |
651 | err->rseqno = obj->last_read_seqno; | 669 | err->rseqno = obj->last_read_seqno; |
652 | err->wseqno = obj->last_write_seqno; | 670 | err->wseqno = obj->last_write_seqno; |
653 | err->gtt_offset = i915_gem_obj_ggtt_offset(obj); | 671 | err->gtt_offset = vma->node.start; |
654 | err->read_domains = obj->base.read_domains; | 672 | err->read_domains = obj->base.read_domains; |
655 | err->write_domain = obj->base.write_domain; | 673 | err->write_domain = obj->base.write_domain; |
656 | err->fence_reg = obj->fence_reg; | 674 | err->fence_reg = obj->fence_reg; |
@@ -674,7 +692,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err, | |||
674 | int i = 0; | 692 | int i = 0; |
675 | 693 | ||
676 | list_for_each_entry(vma, head, mm_list) { | 694 | list_for_each_entry(vma, head, mm_list) { |
677 | capture_bo(err++, vma->obj); | 695 | capture_bo(err++, vma); |
678 | if (++i == count) | 696 | if (++i == count) |
679 | break; | 697 | break; |
680 | } | 698 | } |
@@ -683,21 +701,27 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err, | |||
683 | } | 701 | } |
684 | 702 | ||
685 | static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, | 703 | static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, |
686 | int count, struct list_head *head) | 704 | int count, struct list_head *head, |
705 | struct i915_address_space *vm) | ||
687 | { | 706 | { |
688 | struct drm_i915_gem_object *obj; | 707 | struct drm_i915_gem_object *obj; |
689 | int i = 0; | 708 | struct drm_i915_error_buffer * const first = err; |
709 | struct drm_i915_error_buffer * const last = err + count; | ||
690 | 710 | ||
691 | list_for_each_entry(obj, head, global_list) { | 711 | list_for_each_entry(obj, head, global_list) { |
692 | if (!i915_gem_obj_is_pinned(obj)) | 712 | struct i915_vma *vma; |
693 | continue; | ||
694 | 713 | ||
695 | capture_bo(err++, obj); | 714 | if (err == last) |
696 | if (++i == count) | ||
697 | break; | 715 | break; |
716 | |||
717 | list_for_each_entry(vma, &obj->vma_list, vma_link) | ||
718 | if (vma->vm == vm && vma->pin_count > 0) { | ||
719 | capture_bo(err++, vma); | ||
720 | break; | ||
721 | } | ||
698 | } | 722 | } |
699 | 723 | ||
700 | return i; | 724 | return err - first; |
701 | } | 725 | } |
702 | 726 | ||
703 | /* Generate a semi-unique error code. The code is not meant to have meaning, The | 727 | /* Generate a semi-unique error code. The code is not meant to have meaning, The |
@@ -890,9 +914,6 @@ static void i915_record_ring_state(struct drm_device *dev, | |||
890 | ering->hws = I915_READ(mmio); | 914 | ering->hws = I915_READ(mmio); |
891 | } | 915 | } |
892 | 916 | ||
893 | ering->cpu_ring_head = ring->buffer->head; | ||
894 | ering->cpu_ring_tail = ring->buffer->tail; | ||
895 | |||
896 | ering->hangcheck_score = ring->hangcheck.score; | 917 | ering->hangcheck_score = ring->hangcheck.score; |
897 | ering->hangcheck_action = ring->hangcheck.action; | 918 | ering->hangcheck_action = ring->hangcheck.action; |
898 | 919 | ||
@@ -955,6 +976,7 @@ static void i915_gem_record_rings(struct drm_device *dev, | |||
955 | 976 | ||
956 | for (i = 0; i < I915_NUM_RINGS; i++) { | 977 | for (i = 0; i < I915_NUM_RINGS; i++) { |
957 | struct intel_engine_cs *ring = &dev_priv->ring[i]; | 978 | struct intel_engine_cs *ring = &dev_priv->ring[i]; |
979 | struct intel_ringbuffer *rbuf; | ||
958 | 980 | ||
959 | error->ring[i].pid = -1; | 981 | error->ring[i].pid = -1; |
960 | 982 | ||
@@ -967,6 +989,12 @@ static void i915_gem_record_rings(struct drm_device *dev, | |||
967 | 989 | ||
968 | request = i915_gem_find_active_request(ring); | 990 | request = i915_gem_find_active_request(ring); |
969 | if (request) { | 991 | if (request) { |
992 | struct i915_address_space *vm; | ||
993 | |||
994 | vm = request->ctx && request->ctx->ppgtt ? | ||
995 | &request->ctx->ppgtt->base : | ||
996 | &dev_priv->gtt.base; | ||
997 | |||
970 | /* We need to copy these to an anonymous buffer | 998 | /* We need to copy these to an anonymous buffer |
971 | * as the simplest method to avoid being overwritten | 999 | * as the simplest method to avoid being overwritten |
972 | * by userspace. | 1000 | * by userspace. |
@@ -974,12 +1002,9 @@ static void i915_gem_record_rings(struct drm_device *dev, | |||
974 | error->ring[i].batchbuffer = | 1002 | error->ring[i].batchbuffer = |
975 | i915_error_object_create(dev_priv, | 1003 | i915_error_object_create(dev_priv, |
976 | request->batch_obj, | 1004 | request->batch_obj, |
977 | request->ctx ? | 1005 | vm); |
978 | request->ctx->vm : | ||
979 | &dev_priv->gtt.base); | ||
980 | 1006 | ||
981 | if (HAS_BROKEN_CS_TLB(dev_priv->dev) && | 1007 | if (HAS_BROKEN_CS_TLB(dev_priv->dev)) |
982 | ring->scratch.obj) | ||
983 | error->ring[i].wa_batchbuffer = | 1008 | error->ring[i].wa_batchbuffer = |
984 | i915_error_ggtt_object_create(dev_priv, | 1009 | i915_error_ggtt_object_create(dev_priv, |
985 | ring->scratch.obj); | 1010 | ring->scratch.obj); |
@@ -998,12 +1023,27 @@ static void i915_gem_record_rings(struct drm_device *dev, | |||
998 | } | 1023 | } |
999 | } | 1024 | } |
1000 | 1025 | ||
1026 | if (i915.enable_execlists) { | ||
1027 | /* TODO: This is only a small fix to keep basic error | ||
1028 | * capture working, but we need to add more information | ||
1029 | * for it to be useful (e.g. dump the context being | ||
1030 | * executed). | ||
1031 | */ | ||
1032 | if (request) | ||
1033 | rbuf = request->ctx->engine[ring->id].ringbuf; | ||
1034 | else | ||
1035 | rbuf = ring->default_context->engine[ring->id].ringbuf; | ||
1036 | } else | ||
1037 | rbuf = ring->buffer; | ||
1038 | |||
1039 | error->ring[i].cpu_ring_head = rbuf->head; | ||
1040 | error->ring[i].cpu_ring_tail = rbuf->tail; | ||
1041 | |||
1001 | error->ring[i].ringbuffer = | 1042 | error->ring[i].ringbuffer = |
1002 | i915_error_ggtt_object_create(dev_priv, ring->buffer->obj); | 1043 | i915_error_ggtt_object_create(dev_priv, rbuf->obj); |
1003 | 1044 | ||
1004 | if (ring->status_page.obj) | 1045 | error->ring[i].hws_page = |
1005 | error->ring[i].hws_page = | 1046 | i915_error_ggtt_object_create(dev_priv, ring->status_page.obj); |
1006 | i915_error_ggtt_object_create(dev_priv, ring->status_page.obj); | ||
1007 | 1047 | ||
1008 | i915_gem_record_active_context(ring, error, &error->ring[i]); | 1048 | i915_gem_record_active_context(ring, error, &error->ring[i]); |
1009 | 1049 | ||
@@ -1049,9 +1089,14 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, | |||
1049 | list_for_each_entry(vma, &vm->active_list, mm_list) | 1089 | list_for_each_entry(vma, &vm->active_list, mm_list) |
1050 | i++; | 1090 | i++; |
1051 | error->active_bo_count[ndx] = i; | 1091 | error->active_bo_count[ndx] = i; |
1052 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) | 1092 | |
1053 | if (i915_gem_obj_is_pinned(obj)) | 1093 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
1054 | i++; | 1094 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
1095 | if (vma->vm == vm && vma->pin_count > 0) { | ||
1096 | i++; | ||
1097 | break; | ||
1098 | } | ||
1099 | } | ||
1055 | error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; | 1100 | error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; |
1056 | 1101 | ||
1057 | if (i) { | 1102 | if (i) { |
@@ -1070,7 +1115,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, | |||
1070 | error->pinned_bo_count[ndx] = | 1115 | error->pinned_bo_count[ndx] = |
1071 | capture_pinned_bo(pinned_bo, | 1116 | capture_pinned_bo(pinned_bo, |
1072 | error->pinned_bo_count[ndx], | 1117 | error->pinned_bo_count[ndx], |
1073 | &dev_priv->mm.bound_list); | 1118 | &dev_priv->mm.bound_list, vm); |
1074 | error->active_bo[ndx] = active_bo; | 1119 | error->active_bo[ndx] = active_bo; |
1075 | error->pinned_bo[ndx] = pinned_bo; | 1120 | error->pinned_bo[ndx] = pinned_bo; |
1076 | } | 1121 | } |
@@ -1091,8 +1136,25 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, | |||
1091 | error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), | 1136 | error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), |
1092 | GFP_ATOMIC); | 1137 | GFP_ATOMIC); |
1093 | 1138 | ||
1094 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) | 1139 | if (error->active_bo == NULL || |
1095 | i915_gem_capture_vm(dev_priv, error, vm, i++); | 1140 | error->pinned_bo == NULL || |
1141 | error->active_bo_count == NULL || | ||
1142 | error->pinned_bo_count == NULL) { | ||
1143 | kfree(error->active_bo); | ||
1144 | kfree(error->active_bo_count); | ||
1145 | kfree(error->pinned_bo); | ||
1146 | kfree(error->pinned_bo_count); | ||
1147 | |||
1148 | error->active_bo = NULL; | ||
1149 | error->active_bo_count = NULL; | ||
1150 | error->pinned_bo = NULL; | ||
1151 | error->pinned_bo_count = NULL; | ||
1152 | } else { | ||
1153 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) | ||
1154 | i915_gem_capture_vm(dev_priv, error, vm, i++); | ||
1155 | |||
1156 | error->vm_count = cnt; | ||
1157 | } | ||
1096 | } | 1158 | } |
1097 | 1159 | ||
1098 | /* Capture all registers which don't fit into another category. */ | 1160 | /* Capture all registers which don't fit into another category. */ |
@@ -1295,11 +1357,11 @@ void i915_destroy_error_state(struct drm_device *dev) | |||
1295 | kref_put(&error->ref, i915_error_state_free); | 1357 | kref_put(&error->ref, i915_error_state_free); |
1296 | } | 1358 | } |
1297 | 1359 | ||
1298 | const char *i915_cache_level_str(int type) | 1360 | const char *i915_cache_level_str(struct drm_i915_private *i915, int type) |
1299 | { | 1361 | { |
1300 | switch (type) { | 1362 | switch (type) { |
1301 | case I915_CACHE_NONE: return " uncached"; | 1363 | case I915_CACHE_NONE: return " uncached"; |
1302 | case I915_CACHE_LLC: return " snooped or LLC"; | 1364 | case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped"; |
1303 | case I915_CACHE_L3_LLC: return " L3+LLC"; | 1365 | case I915_CACHE_L3_LLC: return " L3+LLC"; |
1304 | case I915_CACHE_WT: return " WT"; | 1366 | case I915_CACHE_WT: return " WT"; |
1305 | default: return ""; | 1367 | default: return ""; |