diff options
author | Dave Airlie <airlied@redhat.com> | 2010-01-10 23:43:16 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-01-10 23:43:16 -0500 |
commit | f22d6ddaeb8126623d62c828a4d4a96dfc4cbc5c (patch) | |
tree | f866bf0e2445144208a9884de89b50ca94c43be2 /drivers/gpu | |
parent | 0c9d2c418aa4a45534943c4c9a1c8dda82d3b481 (diff) | |
parent | 40c2298bdcc8b766a39964c44e9a74d16aa95d53 (diff) |
Merge branch 'for-airlied' of /ssd/git/drm-nouveau-next into drm-linus
* 'for-airlied' of /ssd/git/drm-nouveau-next: (28 commits)
drm/nv04: Fix set_operation software method.
drm/nouveau: initialise DMA tracking parameters earlier
drm/nouveau: use dma.max rather than pushbuf size for checking GET validity
drm/nv04: differentiate between nv04/nv05
drm/nouveau: Fix null deref in nouveau_fence_emit due to deleted fence
drm/nv50: prevent a possible ctxprog hang
drm/nouveau: have ttm's fault handler called directly
drm/nv50: restore correct cache1 get/put address on fifoctx load
drm/nouveau: create function for "dealing" with gpu lockup
drm/nouveau: remove unused nouveau_channel_idle() function
drm/nouveau: fix handling of fbcon colours in 8bpp
drm/nv04: Context switching fixes.
drm/nouveau: Use the software object for fencing.
drm/nouveau: Allocate a per-channel instance of NV_SW.
drm/nv50: make the blocksize depend on vram size
drm/nouveau: better alignment of bo sizes and use roundup instead of ALIGN
drm/nouveau: Don't skip card take down on nv0x.
drm/nouveau: Implement nv42-nv43 TV load detection.
drm/nouveau: Clean up the nv17-nv4x load detection code a bit.
drm/nv50: fix fillrect color
...
Diffstat (limited to 'drivers/gpu')
29 files changed, 853 insertions, 496 deletions
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index b1bc1ea182b8..1175429da102 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig | |||
@@ -30,12 +30,11 @@ config DRM_NOUVEAU_DEBUG | |||
30 | via debugfs. | 30 | via debugfs. |
31 | 31 | ||
32 | menu "I2C encoder or helper chips" | 32 | menu "I2C encoder or helper chips" |
33 | depends on DRM && I2C | 33 | depends on DRM && DRM_KMS_HELPER && I2C |
34 | 34 | ||
35 | config DRM_I2C_CH7006 | 35 | config DRM_I2C_CH7006 |
36 | tristate "Chrontel ch7006 TV encoder" | 36 | tristate "Chrontel ch7006 TV encoder" |
37 | depends on DRM_NOUVEAU | 37 | default m if DRM_NOUVEAU |
38 | default m | ||
39 | help | 38 | help |
40 | Support for Chrontel ch7006 and similar TV encoders, found | 39 | Support for Chrontel ch7006 and similar TV encoders, found |
41 | on some nVidia video cards. | 40 | on some nVidia video cards. |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 0cad6d834eb2..e342a418d434 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -33,10 +33,13 @@ | |||
33 | #include "nouveau_drv.h" | 33 | #include "nouveau_drv.h" |
34 | #include "nouveau_dma.h" | 34 | #include "nouveau_dma.h" |
35 | 35 | ||
36 | #include <linux/log2.h> | ||
37 | |||
36 | static void | 38 | static void |
37 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | 39 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) |
38 | { | 40 | { |
39 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 41 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
42 | struct drm_device *dev = dev_priv->dev; | ||
40 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 43 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
41 | 44 | ||
42 | ttm_bo_kunmap(&nvbo->kmap); | 45 | ttm_bo_kunmap(&nvbo->kmap); |
@@ -44,12 +47,87 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
44 | if (unlikely(nvbo->gem)) | 47 | if (unlikely(nvbo->gem)) |
45 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 48 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
46 | 49 | ||
50 | if (nvbo->tile) | ||
51 | nv10_mem_expire_tiling(dev, nvbo->tile, NULL); | ||
52 | |||
47 | spin_lock(&dev_priv->ttm.bo_list_lock); | 53 | spin_lock(&dev_priv->ttm.bo_list_lock); |
48 | list_del(&nvbo->head); | 54 | list_del(&nvbo->head); |
49 | spin_unlock(&dev_priv->ttm.bo_list_lock); | 55 | spin_unlock(&dev_priv->ttm.bo_list_lock); |
50 | kfree(nvbo); | 56 | kfree(nvbo); |
51 | } | 57 | } |
52 | 58 | ||
59 | static void | ||
60 | nouveau_bo_fixup_align(struct drm_device *dev, | ||
61 | uint32_t tile_mode, uint32_t tile_flags, | ||
62 | int *align, int *size) | ||
63 | { | ||
64 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
65 | |||
66 | /* | ||
67 | * Some of the tile_flags have a periodic structure of N*4096 bytes, | ||
68 | * align to to that as well as the page size. Overallocate memory to | ||
69 | * avoid corruption of other buffer objects. | ||
70 | */ | ||
71 | if (dev_priv->card_type == NV_50) { | ||
72 | uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; | ||
73 | int i; | ||
74 | |||
75 | switch (tile_flags) { | ||
76 | case 0x1800: | ||
77 | case 0x2800: | ||
78 | case 0x4800: | ||
79 | case 0x7a00: | ||
80 | *size = roundup(*size, block_size); | ||
81 | if (is_power_of_2(block_size)) { | ||
82 | *size += 3 * block_size; | ||
83 | for (i = 1; i < 10; i++) { | ||
84 | *align = 12 * i * block_size; | ||
85 | if (!(*align % 65536)) | ||
86 | break; | ||
87 | } | ||
88 | } else { | ||
89 | *size += 6 * block_size; | ||
90 | for (i = 1; i < 10; i++) { | ||
91 | *align = 8 * i * block_size; | ||
92 | if (!(*align % 65536)) | ||
93 | break; | ||
94 | } | ||
95 | } | ||
96 | break; | ||
97 | default: | ||
98 | break; | ||
99 | } | ||
100 | |||
101 | } else { | ||
102 | if (tile_mode) { | ||
103 | if (dev_priv->chipset >= 0x40) { | ||
104 | *align = 65536; | ||
105 | *size = roundup(*size, 64 * tile_mode); | ||
106 | |||
107 | } else if (dev_priv->chipset >= 0x30) { | ||
108 | *align = 32768; | ||
109 | *size = roundup(*size, 64 * tile_mode); | ||
110 | |||
111 | } else if (dev_priv->chipset >= 0x20) { | ||
112 | *align = 16384; | ||
113 | *size = roundup(*size, 64 * tile_mode); | ||
114 | |||
115 | } else if (dev_priv->chipset >= 0x10) { | ||
116 | *align = 16384; | ||
117 | *size = roundup(*size, 32 * tile_mode); | ||
118 | } | ||
119 | } | ||
120 | } | ||
121 | |||
122 | /* ALIGN works only on powers of two. */ | ||
123 | *size = roundup(*size, PAGE_SIZE); | ||
124 | |||
125 | if (dev_priv->card_type == NV_50) { | ||
126 | *size = roundup(*size, 65536); | ||
127 | *align = max(65536, *align); | ||
128 | } | ||
129 | } | ||
130 | |||
53 | int | 131 | int |
54 | nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | 132 | nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, |
55 | int size, int align, uint32_t flags, uint32_t tile_mode, | 133 | int size, int align, uint32_t flags, uint32_t tile_mode, |
@@ -58,7 +136,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
58 | { | 136 | { |
59 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 137 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
60 | struct nouveau_bo *nvbo; | 138 | struct nouveau_bo *nvbo; |
61 | int ret, n = 0; | 139 | int ret = 0; |
62 | 140 | ||
63 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); | 141 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); |
64 | if (!nvbo) | 142 | if (!nvbo) |
@@ -70,59 +148,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
70 | nvbo->tile_mode = tile_mode; | 148 | nvbo->tile_mode = tile_mode; |
71 | nvbo->tile_flags = tile_flags; | 149 | nvbo->tile_flags = tile_flags; |
72 | 150 | ||
73 | /* | 151 | nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); |
74 | * Some of the tile_flags have a periodic structure of N*4096 bytes, | ||
75 | * align to to that as well as the page size. Overallocate memory to | ||
76 | * avoid corruption of other buffer objects. | ||
77 | */ | ||
78 | switch (tile_flags) { | ||
79 | case 0x1800: | ||
80 | case 0x2800: | ||
81 | case 0x4800: | ||
82 | case 0x7a00: | ||
83 | if (dev_priv->chipset >= 0xA0) { | ||
84 | /* This is based on high end cards with 448 bits | ||
85 | * memory bus, could be different elsewhere.*/ | ||
86 | size += 6 * 28672; | ||
87 | /* 8 * 28672 is the actual alignment requirement, | ||
88 | * but we must also align to page size. */ | ||
89 | align = 2 * 8 * 28672; | ||
90 | } else if (dev_priv->chipset >= 0x90) { | ||
91 | size += 3 * 16384; | ||
92 | align = 12 * 16834; | ||
93 | } else { | ||
94 | size += 3 * 8192; | ||
95 | /* 12 * 8192 is the actual alignment requirement, | ||
96 | * but we must also align to page size. */ | ||
97 | align = 2 * 12 * 8192; | ||
98 | } | ||
99 | break; | ||
100 | default: | ||
101 | break; | ||
102 | } | ||
103 | |||
104 | align >>= PAGE_SHIFT; | 152 | align >>= PAGE_SHIFT; |
105 | 153 | ||
106 | size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); | ||
107 | if (dev_priv->card_type == NV_50) { | ||
108 | size = (size + 65535) & ~65535; | ||
109 | if (align < (65536 / PAGE_SIZE)) | ||
110 | align = (65536 / PAGE_SIZE); | ||
111 | } | ||
112 | |||
113 | if (flags & TTM_PL_FLAG_VRAM) | ||
114 | nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; | ||
115 | if (flags & TTM_PL_FLAG_TT) | ||
116 | nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | ||
117 | nvbo->placement.fpfn = 0; | 154 | nvbo->placement.fpfn = 0; |
118 | nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; | 155 | nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; |
119 | nvbo->placement.placement = nvbo->placements; | 156 | nouveau_bo_placement_set(nvbo, flags); |
120 | nvbo->placement.busy_placement = nvbo->placements; | ||
121 | nvbo->placement.num_placement = n; | ||
122 | nvbo->placement.num_busy_placement = n; | ||
123 | 157 | ||
124 | nvbo->channel = chan; | 158 | nvbo->channel = chan; |
125 | nouveau_bo_placement_set(nvbo, flags); | ||
126 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, | 159 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, |
127 | ttm_bo_type_device, &nvbo->placement, align, 0, | 160 | ttm_bo_type_device, &nvbo->placement, align, 0, |
128 | false, NULL, size, nouveau_bo_del_ttm); | 161 | false, NULL, size, nouveau_bo_del_ttm); |
@@ -421,6 +454,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |||
421 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access | 454 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access |
422 | * TTM_PL_{VRAM,TT} directly. | 455 | * TTM_PL_{VRAM,TT} directly. |
423 | */ | 456 | */ |
457 | |||
424 | static int | 458 | static int |
425 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | 459 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, |
426 | struct nouveau_bo *nvbo, bool evict, bool no_wait, | 460 | struct nouveau_bo *nvbo, bool evict, bool no_wait, |
@@ -455,11 +489,12 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, | |||
455 | } | 489 | } |
456 | 490 | ||
457 | static int | 491 | static int |
458 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait, | 492 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, |
459 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 493 | int no_wait, struct ttm_mem_reg *new_mem) |
460 | { | 494 | { |
461 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 495 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
462 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 496 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
497 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
463 | struct nouveau_channel *chan; | 498 | struct nouveau_channel *chan; |
464 | uint64_t src_offset, dst_offset; | 499 | uint64_t src_offset, dst_offset; |
465 | uint32_t page_count; | 500 | uint32_t page_count; |
@@ -547,7 +582,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
547 | 582 | ||
548 | placement.fpfn = placement.lpfn = 0; | 583 | placement.fpfn = placement.lpfn = 0; |
549 | placement.num_placement = placement.num_busy_placement = 1; | 584 | placement.num_placement = placement.num_busy_placement = 1; |
550 | placement.placement = &placement_memtype; | 585 | placement.placement = placement.busy_placement = &placement_memtype; |
551 | 586 | ||
552 | tmp_mem = *new_mem; | 587 | tmp_mem = *new_mem; |
553 | tmp_mem.mm_node = NULL; | 588 | tmp_mem.mm_node = NULL; |
@@ -559,7 +594,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
559 | if (ret) | 594 | if (ret) |
560 | goto out; | 595 | goto out; |
561 | 596 | ||
562 | ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem); | 597 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); |
563 | if (ret) | 598 | if (ret) |
564 | goto out; | 599 | goto out; |
565 | 600 | ||
@@ -585,7 +620,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
585 | 620 | ||
586 | placement.fpfn = placement.lpfn = 0; | 621 | placement.fpfn = placement.lpfn = 0; |
587 | placement.num_placement = placement.num_busy_placement = 1; | 622 | placement.num_placement = placement.num_busy_placement = 1; |
588 | placement.placement = &placement_memtype; | 623 | placement.placement = placement.busy_placement = &placement_memtype; |
589 | 624 | ||
590 | tmp_mem = *new_mem; | 625 | tmp_mem = *new_mem; |
591 | tmp_mem.mm_node = NULL; | 626 | tmp_mem.mm_node = NULL; |
@@ -597,7 +632,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
597 | if (ret) | 632 | if (ret) |
598 | goto out; | 633 | goto out; |
599 | 634 | ||
600 | ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem); | 635 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); |
601 | if (ret) | 636 | if (ret) |
602 | goto out; | 637 | goto out; |
603 | 638 | ||
@@ -612,52 +647,106 @@ out: | |||
612 | } | 647 | } |
613 | 648 | ||
614 | static int | 649 | static int |
615 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | 650 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, |
616 | bool no_wait, struct ttm_mem_reg *new_mem) | 651 | struct nouveau_tile_reg **new_tile) |
617 | { | 652 | { |
618 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 653 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
619 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
620 | struct drm_device *dev = dev_priv->dev; | 654 | struct drm_device *dev = dev_priv->dev; |
621 | struct ttm_mem_reg *old_mem = &bo->mem; | 655 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
656 | uint64_t offset; | ||
622 | int ret; | 657 | int ret; |
623 | 658 | ||
624 | if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM && | 659 | if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { |
625 | !nvbo->no_vm) { | 660 | /* Nothing to do. */ |
626 | uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT; | 661 | *new_tile = NULL; |
662 | return 0; | ||
663 | } | ||
664 | |||
665 | offset = new_mem->mm_node->start << PAGE_SHIFT; | ||
627 | 666 | ||
667 | if (dev_priv->card_type == NV_50) { | ||
628 | ret = nv50_mem_vm_bind_linear(dev, | 668 | ret = nv50_mem_vm_bind_linear(dev, |
629 | offset + dev_priv->vm_vram_base, | 669 | offset + dev_priv->vm_vram_base, |
630 | new_mem->size, nvbo->tile_flags, | 670 | new_mem->size, nvbo->tile_flags, |
631 | offset); | 671 | offset); |
632 | if (ret) | 672 | if (ret) |
633 | return ret; | 673 | return ret; |
674 | |||
675 | } else if (dev_priv->card_type >= NV_10) { | ||
676 | *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, | ||
677 | nvbo->tile_mode); | ||
634 | } | 678 | } |
635 | 679 | ||
680 | return 0; | ||
681 | } | ||
682 | |||
683 | static void | ||
684 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | ||
685 | struct nouveau_tile_reg *new_tile, | ||
686 | struct nouveau_tile_reg **old_tile) | ||
687 | { | ||
688 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | ||
689 | struct drm_device *dev = dev_priv->dev; | ||
690 | |||
691 | if (dev_priv->card_type >= NV_10 && | ||
692 | dev_priv->card_type < NV_50) { | ||
693 | if (*old_tile) | ||
694 | nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj); | ||
695 | |||
696 | *old_tile = new_tile; | ||
697 | } | ||
698 | } | ||
699 | |||
700 | static int | ||
701 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | ||
702 | bool no_wait, struct ttm_mem_reg *new_mem) | ||
703 | { | ||
704 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | ||
705 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
706 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
707 | struct nouveau_tile_reg *new_tile = NULL; | ||
708 | int ret = 0; | ||
709 | |||
710 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); | ||
711 | if (ret) | ||
712 | return ret; | ||
713 | |||
714 | /* Software copy if the card isn't up and running yet. */ | ||
636 | if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || | 715 | if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || |
637 | !dev_priv->channel) | 716 | !dev_priv->channel) { |
638 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | 717 | ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); |
718 | goto out; | ||
719 | } | ||
639 | 720 | ||
721 | /* Fake bo copy. */ | ||
640 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { | 722 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
641 | BUG_ON(bo->mem.mm_node != NULL); | 723 | BUG_ON(bo->mem.mm_node != NULL); |
642 | bo->mem = *new_mem; | 724 | bo->mem = *new_mem; |
643 | new_mem->mm_node = NULL; | 725 | new_mem->mm_node = NULL; |
644 | return 0; | 726 | goto out; |
645 | } | 727 | } |
646 | 728 | ||
647 | if (new_mem->mem_type == TTM_PL_SYSTEM) { | 729 | /* Hardware assisted copy. */ |
648 | if (old_mem->mem_type == TTM_PL_SYSTEM) | 730 | if (new_mem->mem_type == TTM_PL_SYSTEM) |
649 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | 731 | ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); |
650 | if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem)) | 732 | else if (old_mem->mem_type == TTM_PL_SYSTEM) |
651 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | 733 | ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); |
652 | } else if (old_mem->mem_type == TTM_PL_SYSTEM) { | 734 | else |
653 | if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem)) | 735 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); |
654 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | ||
655 | } else { | ||
656 | if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem)) | ||
657 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | ||
658 | } | ||
659 | 736 | ||
660 | return 0; | 737 | if (!ret) |
738 | goto out; | ||
739 | |||
740 | /* Fallback to software copy. */ | ||
741 | ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | ||
742 | |||
743 | out: | ||
744 | if (ret) | ||
745 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | ||
746 | else | ||
747 | nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); | ||
748 | |||
749 | return ret; | ||
661 | } | 750 | } |
662 | 751 | ||
663 | static int | 752 | static int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 9aaa972f8822..343d718a9667 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -158,6 +158,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
158 | return ret; | 158 | return ret; |
159 | } | 159 | } |
160 | 160 | ||
161 | nouveau_dma_pre_init(chan); | ||
162 | |||
161 | /* Locate channel's user control regs */ | 163 | /* Locate channel's user control regs */ |
162 | if (dev_priv->card_type < NV_40) | 164 | if (dev_priv->card_type < NV_40) |
163 | user = NV03_USER(channel); | 165 | user = NV03_USER(channel); |
@@ -235,47 +237,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
235 | return 0; | 237 | return 0; |
236 | } | 238 | } |
237 | 239 | ||
238 | int | ||
239 | nouveau_channel_idle(struct nouveau_channel *chan) | ||
240 | { | ||
241 | struct drm_device *dev = chan->dev; | ||
242 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
243 | struct nouveau_engine *engine = &dev_priv->engine; | ||
244 | uint32_t caches; | ||
245 | int idle; | ||
246 | |||
247 | if (!chan) { | ||
248 | NV_ERROR(dev, "no channel...\n"); | ||
249 | return 1; | ||
250 | } | ||
251 | |||
252 | caches = nv_rd32(dev, NV03_PFIFO_CACHES); | ||
253 | nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1); | ||
254 | |||
255 | if (engine->fifo.channel_id(dev) != chan->id) { | ||
256 | struct nouveau_gpuobj *ramfc = | ||
257 | chan->ramfc ? chan->ramfc->gpuobj : NULL; | ||
258 | |||
259 | if (!ramfc) { | ||
260 | NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id); | ||
261 | return 1; | ||
262 | } | ||
263 | |||
264 | engine->instmem.prepare_access(dev, false); | ||
265 | if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1)) | ||
266 | idle = 0; | ||
267 | else | ||
268 | idle = 1; | ||
269 | engine->instmem.finish_access(dev); | ||
270 | } else { | ||
271 | idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) == | ||
272 | nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); | ||
273 | } | ||
274 | |||
275 | nv_wr32(dev, NV03_PFIFO_CACHES, caches); | ||
276 | return idle; | ||
277 | } | ||
278 | |||
279 | /* stops a fifo */ | 240 | /* stops a fifo */ |
280 | void | 241 | void |
281 | nouveau_channel_free(struct nouveau_channel *chan) | 242 | nouveau_channel_free(struct nouveau_channel *chan) |
@@ -414,7 +375,9 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, | |||
414 | init->subchan[0].grclass = 0x0039; | 375 | init->subchan[0].grclass = 0x0039; |
415 | else | 376 | else |
416 | init->subchan[0].grclass = 0x5039; | 377 | init->subchan[0].grclass = 0x5039; |
417 | init->nr_subchan = 1; | 378 | init->subchan[1].handle = NvSw; |
379 | init->subchan[1].grclass = NV_SW; | ||
380 | init->nr_subchan = 2; | ||
418 | 381 | ||
419 | /* Named memory object area */ | 382 | /* Named memory object area */ |
420 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, | 383 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 703553687b20..7afbe8b40d51 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -29,12 +29,22 @@ | |||
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_dma.h" | 30 | #include "nouveau_dma.h" |
31 | 31 | ||
32 | void | ||
33 | nouveau_dma_pre_init(struct nouveau_channel *chan) | ||
34 | { | ||
35 | chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2; | ||
36 | chan->dma.put = 0; | ||
37 | chan->dma.cur = chan->dma.put; | ||
38 | chan->dma.free = chan->dma.max - chan->dma.cur; | ||
39 | } | ||
40 | |||
32 | int | 41 | int |
33 | nouveau_dma_init(struct nouveau_channel *chan) | 42 | nouveau_dma_init(struct nouveau_channel *chan) |
34 | { | 43 | { |
35 | struct drm_device *dev = chan->dev; | 44 | struct drm_device *dev = chan->dev; |
36 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 45 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
37 | struct nouveau_gpuobj *m2mf = NULL; | 46 | struct nouveau_gpuobj *m2mf = NULL; |
47 | struct nouveau_gpuobj *nvsw = NULL; | ||
38 | int ret, i; | 48 | int ret, i; |
39 | 49 | ||
40 | /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ | 50 | /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ |
@@ -47,6 +57,15 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
47 | if (ret) | 57 | if (ret) |
48 | return ret; | 58 | return ret; |
49 | 59 | ||
60 | /* Create an NV_SW object for various sync purposes */ | ||
61 | ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw); | ||
62 | if (ret) | ||
63 | return ret; | ||
64 | |||
65 | ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL); | ||
66 | if (ret) | ||
67 | return ret; | ||
68 | |||
50 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ | 69 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ |
51 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); | 70 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); |
52 | if (ret) | 71 | if (ret) |
@@ -64,12 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
64 | return ret; | 83 | return ret; |
65 | } | 84 | } |
66 | 85 | ||
67 | /* Initialise DMA vars */ | ||
68 | chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2; | ||
69 | chan->dma.put = 0; | ||
70 | chan->dma.cur = chan->dma.put; | ||
71 | chan->dma.free = chan->dma.max - chan->dma.cur; | ||
72 | |||
73 | /* Insert NOPS for NOUVEAU_DMA_SKIPS */ | 86 | /* Insert NOPS for NOUVEAU_DMA_SKIPS */ |
74 | ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); | 87 | ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); |
75 | if (ret) | 88 | if (ret) |
@@ -87,6 +100,13 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
87 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); | 100 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); |
88 | OUT_RING(chan, NvNotify0); | 101 | OUT_RING(chan, NvNotify0); |
89 | 102 | ||
103 | /* Initialise NV_SW */ | ||
104 | ret = RING_SPACE(chan, 2); | ||
105 | if (ret) | ||
106 | return ret; | ||
107 | BEGIN_RING(chan, NvSubSw, 0, 1); | ||
108 | OUT_RING(chan, NvSw); | ||
109 | |||
90 | /* Sit back and pray the channel works.. */ | 110 | /* Sit back and pray the channel works.. */ |
91 | FIRE_RING(chan); | 111 | FIRE_RING(chan); |
92 | 112 | ||
@@ -113,7 +133,7 @@ READ_GET(struct nouveau_channel *chan, uint32_t *get) | |||
113 | 133 | ||
114 | val = nvchan_rd32(chan, chan->user_get); | 134 | val = nvchan_rd32(chan, chan->user_get); |
115 | if (val < chan->pushbuf_base || | 135 | if (val < chan->pushbuf_base || |
116 | val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) { | 136 | val > chan->pushbuf_base + (chan->dma.max << 2)) { |
117 | /* meaningless to dma_wait() except to know whether the | 137 | /* meaningless to dma_wait() except to know whether the |
118 | * GPU has stalled or not | 138 | * GPU has stalled or not |
119 | */ | 139 | */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index 04e85d8f757e..dabfd655f93e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h | |||
@@ -46,10 +46,11 @@ | |||
46 | /* Hardcoded object assignments to subchannels (subchannel id). */ | 46 | /* Hardcoded object assignments to subchannels (subchannel id). */ |
47 | enum { | 47 | enum { |
48 | NvSubM2MF = 0, | 48 | NvSubM2MF = 0, |
49 | NvSub2D = 1, | 49 | NvSubSw = 1, |
50 | NvSubCtxSurf2D = 1, | 50 | NvSub2D = 2, |
51 | NvSubGdiRect = 2, | 51 | NvSubCtxSurf2D = 2, |
52 | NvSubImageBlit = 3 | 52 | NvSubGdiRect = 3, |
53 | NvSubImageBlit = 4 | ||
53 | }; | 54 | }; |
54 | 55 | ||
55 | /* Object handles. */ | 56 | /* Object handles. */ |
@@ -67,6 +68,7 @@ enum { | |||
67 | NvClipRect = 0x8000000b, | 68 | NvClipRect = 0x8000000b, |
68 | NvGdiRect = 0x8000000c, | 69 | NvGdiRect = 0x8000000c, |
69 | NvImageBlit = 0x8000000d, | 70 | NvImageBlit = 0x8000000d, |
71 | NvSw = 0x8000000e, | ||
70 | 72 | ||
71 | /* G80+ display objects */ | 73 | /* G80+ display objects */ |
72 | NvEvoVRAM = 0x01000000, | 74 | NvEvoVRAM = 0x01000000, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 5f8cbb79c499..026419fe8791 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -59,11 +59,19 @@ struct nouveau_grctx; | |||
59 | #define MAX_NUM_DCB_ENTRIES 16 | 59 | #define MAX_NUM_DCB_ENTRIES 16 |
60 | 60 | ||
61 | #define NOUVEAU_MAX_CHANNEL_NR 128 | 61 | #define NOUVEAU_MAX_CHANNEL_NR 128 |
62 | #define NOUVEAU_MAX_TILE_NR 15 | ||
62 | 63 | ||
63 | #define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) | 64 | #define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) |
64 | #define NV50_VM_BLOCK (512*1024*1024ULL) | 65 | #define NV50_VM_BLOCK (512*1024*1024ULL) |
65 | #define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) | 66 | #define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) |
66 | 67 | ||
68 | struct nouveau_tile_reg { | ||
69 | struct nouveau_fence *fence; | ||
70 | uint32_t addr; | ||
71 | uint32_t size; | ||
72 | bool used; | ||
73 | }; | ||
74 | |||
67 | struct nouveau_bo { | 75 | struct nouveau_bo { |
68 | struct ttm_buffer_object bo; | 76 | struct ttm_buffer_object bo; |
69 | struct ttm_placement placement; | 77 | struct ttm_placement placement; |
@@ -83,6 +91,7 @@ struct nouveau_bo { | |||
83 | 91 | ||
84 | uint32_t tile_mode; | 92 | uint32_t tile_mode; |
85 | uint32_t tile_flags; | 93 | uint32_t tile_flags; |
94 | struct nouveau_tile_reg *tile; | ||
86 | 95 | ||
87 | struct drm_gem_object *gem; | 96 | struct drm_gem_object *gem; |
88 | struct drm_file *cpu_filp; | 97 | struct drm_file *cpu_filp; |
@@ -277,8 +286,13 @@ struct nouveau_timer_engine { | |||
277 | }; | 286 | }; |
278 | 287 | ||
279 | struct nouveau_fb_engine { | 288 | struct nouveau_fb_engine { |
289 | int num_tiles; | ||
290 | |||
280 | int (*init)(struct drm_device *dev); | 291 | int (*init)(struct drm_device *dev); |
281 | void (*takedown)(struct drm_device *dev); | 292 | void (*takedown)(struct drm_device *dev); |
293 | |||
294 | void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, | ||
295 | uint32_t size, uint32_t pitch); | ||
282 | }; | 296 | }; |
283 | 297 | ||
284 | struct nouveau_fifo_engine { | 298 | struct nouveau_fifo_engine { |
@@ -292,6 +306,8 @@ struct nouveau_fifo_engine { | |||
292 | void (*disable)(struct drm_device *); | 306 | void (*disable)(struct drm_device *); |
293 | void (*enable)(struct drm_device *); | 307 | void (*enable)(struct drm_device *); |
294 | bool (*reassign)(struct drm_device *, bool enable); | 308 | bool (*reassign)(struct drm_device *, bool enable); |
309 | bool (*cache_flush)(struct drm_device *dev); | ||
310 | bool (*cache_pull)(struct drm_device *dev, bool enable); | ||
295 | 311 | ||
296 | int (*channel_id)(struct drm_device *); | 312 | int (*channel_id)(struct drm_device *); |
297 | 313 | ||
@@ -330,6 +346,9 @@ struct nouveau_pgraph_engine { | |||
330 | void (*destroy_context)(struct nouveau_channel *); | 346 | void (*destroy_context)(struct nouveau_channel *); |
331 | int (*load_context)(struct nouveau_channel *); | 347 | int (*load_context)(struct nouveau_channel *); |
332 | int (*unload_context)(struct drm_device *); | 348 | int (*unload_context)(struct drm_device *); |
349 | |||
350 | void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, | ||
351 | uint32_t size, uint32_t pitch); | ||
333 | }; | 352 | }; |
334 | 353 | ||
335 | struct nouveau_engine { | 354 | struct nouveau_engine { |
@@ -548,6 +567,12 @@ struct drm_nouveau_private { | |||
548 | unsigned long sg_handle; | 567 | unsigned long sg_handle; |
549 | } gart_info; | 568 | } gart_info; |
550 | 569 | ||
570 | /* nv10-nv40 tiling regions */ | ||
571 | struct { | ||
572 | struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR]; | ||
573 | spinlock_t lock; | ||
574 | } tile; | ||
575 | |||
551 | /* G8x/G9x virtual address space */ | 576 | /* G8x/G9x virtual address space */ |
552 | uint64_t vm_gart_base; | 577 | uint64_t vm_gart_base; |
553 | uint64_t vm_gart_size; | 578 | uint64_t vm_gart_size; |
@@ -685,6 +710,13 @@ extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); | |||
685 | extern int nouveau_mem_init(struct drm_device *); | 710 | extern int nouveau_mem_init(struct drm_device *); |
686 | extern int nouveau_mem_init_agp(struct drm_device *); | 711 | extern int nouveau_mem_init_agp(struct drm_device *); |
687 | extern void nouveau_mem_close(struct drm_device *); | 712 | extern void nouveau_mem_close(struct drm_device *); |
713 | extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev, | ||
714 | uint32_t addr, | ||
715 | uint32_t size, | ||
716 | uint32_t pitch); | ||
717 | extern void nv10_mem_expire_tiling(struct drm_device *dev, | ||
718 | struct nouveau_tile_reg *tile, | ||
719 | struct nouveau_fence *fence); | ||
688 | extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, | 720 | extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, |
689 | uint32_t size, uint32_t flags, | 721 | uint32_t size, uint32_t flags, |
690 | uint64_t phys); | 722 | uint64_t phys); |
@@ -713,7 +745,6 @@ extern int nouveau_channel_alloc(struct drm_device *dev, | |||
713 | struct drm_file *file_priv, | 745 | struct drm_file *file_priv, |
714 | uint32_t fb_ctxdma, uint32_t tt_ctxdma); | 746 | uint32_t fb_ctxdma, uint32_t tt_ctxdma); |
715 | extern void nouveau_channel_free(struct nouveau_channel *); | 747 | extern void nouveau_channel_free(struct nouveau_channel *); |
716 | extern int nouveau_channel_idle(struct nouveau_channel *chan); | ||
717 | 748 | ||
718 | /* nouveau_object.c */ | 749 | /* nouveau_object.c */ |
719 | extern int nouveau_gpuobj_early_init(struct drm_device *); | 750 | extern int nouveau_gpuobj_early_init(struct drm_device *); |
@@ -756,6 +787,8 @@ extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, | |||
756 | uint32_t *o_ret); | 787 | uint32_t *o_ret); |
757 | extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, | 788 | extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, |
758 | struct nouveau_gpuobj **); | 789 | struct nouveau_gpuobj **); |
790 | extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class, | ||
791 | struct nouveau_gpuobj **); | ||
759 | extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, | 792 | extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, |
760 | struct drm_file *); | 793 | struct drm_file *); |
761 | extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, | 794 | extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, |
@@ -804,6 +837,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan) | |||
804 | #endif | 837 | #endif |
805 | 838 | ||
806 | /* nouveau_dma.c */ | 839 | /* nouveau_dma.c */ |
840 | extern void nouveau_dma_pre_init(struct nouveau_channel *); | ||
807 | extern int nouveau_dma_init(struct nouveau_channel *); | 841 | extern int nouveau_dma_init(struct nouveau_channel *); |
808 | extern int nouveau_dma_wait(struct nouveau_channel *, int size); | 842 | extern int nouveau_dma_wait(struct nouveau_channel *, int size); |
809 | 843 | ||
@@ -879,16 +913,22 @@ extern void nv04_fb_takedown(struct drm_device *); | |||
879 | /* nv10_fb.c */ | 913 | /* nv10_fb.c */ |
880 | extern int nv10_fb_init(struct drm_device *); | 914 | extern int nv10_fb_init(struct drm_device *); |
881 | extern void nv10_fb_takedown(struct drm_device *); | 915 | extern void nv10_fb_takedown(struct drm_device *); |
916 | extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t, | ||
917 | uint32_t, uint32_t); | ||
882 | 918 | ||
883 | /* nv40_fb.c */ | 919 | /* nv40_fb.c */ |
884 | extern int nv40_fb_init(struct drm_device *); | 920 | extern int nv40_fb_init(struct drm_device *); |
885 | extern void nv40_fb_takedown(struct drm_device *); | 921 | extern void nv40_fb_takedown(struct drm_device *); |
922 | extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, | ||
923 | uint32_t, uint32_t); | ||
886 | 924 | ||
887 | /* nv04_fifo.c */ | 925 | /* nv04_fifo.c */ |
888 | extern int nv04_fifo_init(struct drm_device *); | 926 | extern int nv04_fifo_init(struct drm_device *); |
889 | extern void nv04_fifo_disable(struct drm_device *); | 927 | extern void nv04_fifo_disable(struct drm_device *); |
890 | extern void nv04_fifo_enable(struct drm_device *); | 928 | extern void nv04_fifo_enable(struct drm_device *); |
891 | extern bool nv04_fifo_reassign(struct drm_device *, bool); | 929 | extern bool nv04_fifo_reassign(struct drm_device *, bool); |
930 | extern bool nv04_fifo_cache_flush(struct drm_device *); | ||
931 | extern bool nv04_fifo_cache_pull(struct drm_device *, bool); | ||
892 | extern int nv04_fifo_channel_id(struct drm_device *); | 932 | extern int nv04_fifo_channel_id(struct drm_device *); |
893 | extern int nv04_fifo_create_context(struct nouveau_channel *); | 933 | extern int nv04_fifo_create_context(struct nouveau_channel *); |
894 | extern void nv04_fifo_destroy_context(struct nouveau_channel *); | 934 | extern void nv04_fifo_destroy_context(struct nouveau_channel *); |
@@ -941,6 +981,8 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *); | |||
941 | extern int nv10_graph_load_context(struct nouveau_channel *); | 981 | extern int nv10_graph_load_context(struct nouveau_channel *); |
942 | extern int nv10_graph_unload_context(struct drm_device *); | 982 | extern int nv10_graph_unload_context(struct drm_device *); |
943 | extern void nv10_graph_context_switch(struct drm_device *); | 983 | extern void nv10_graph_context_switch(struct drm_device *); |
984 | extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t, | ||
985 | uint32_t, uint32_t); | ||
944 | 986 | ||
945 | /* nv20_graph.c */ | 987 | /* nv20_graph.c */ |
946 | extern struct nouveau_pgraph_object_class nv20_graph_grclass[]; | 988 | extern struct nouveau_pgraph_object_class nv20_graph_grclass[]; |
@@ -952,6 +994,8 @@ extern int nv20_graph_unload_context(struct drm_device *); | |||
952 | extern int nv20_graph_init(struct drm_device *); | 994 | extern int nv20_graph_init(struct drm_device *); |
953 | extern void nv20_graph_takedown(struct drm_device *); | 995 | extern void nv20_graph_takedown(struct drm_device *); |
954 | extern int nv30_graph_init(struct drm_device *); | 996 | extern int nv30_graph_init(struct drm_device *); |
997 | extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t, | ||
998 | uint32_t, uint32_t); | ||
955 | 999 | ||
956 | /* nv40_graph.c */ | 1000 | /* nv40_graph.c */ |
957 | extern struct nouveau_pgraph_object_class nv40_graph_grclass[]; | 1001 | extern struct nouveau_pgraph_object_class nv40_graph_grclass[]; |
@@ -963,6 +1007,8 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *); | |||
963 | extern int nv40_graph_load_context(struct nouveau_channel *); | 1007 | extern int nv40_graph_load_context(struct nouveau_channel *); |
964 | extern int nv40_graph_unload_context(struct drm_device *); | 1008 | extern int nv40_graph_unload_context(struct drm_device *); |
965 | extern void nv40_grctx_init(struct nouveau_grctx *); | 1009 | extern void nv40_grctx_init(struct nouveau_grctx *); |
1010 | extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t, | ||
1011 | uint32_t, uint32_t); | ||
966 | 1012 | ||
967 | /* nv50_graph.c */ | 1013 | /* nv50_graph.c */ |
968 | extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; | 1014 | extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; |
@@ -1030,8 +1076,7 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, | |||
1030 | 1076 | ||
1031 | /* nv04_dac.c */ | 1077 | /* nv04_dac.c */ |
1032 | extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry); | 1078 | extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry); |
1033 | extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, | 1079 | extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder); |
1034 | struct drm_connector *connector); | ||
1035 | extern int nv04_dac_output_offset(struct drm_encoder *encoder); | 1080 | extern int nv04_dac_output_offset(struct drm_encoder *encoder); |
1036 | extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); | 1081 | extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); |
1037 | 1082 | ||
@@ -1049,9 +1094,6 @@ extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry); | |||
1049 | 1094 | ||
1050 | /* nv17_tv.c */ | 1095 | /* nv17_tv.c */ |
1051 | extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry); | 1096 | extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry); |
1052 | extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, | ||
1053 | struct drm_connector *connector, | ||
1054 | uint32_t pin_mask); | ||
1055 | 1097 | ||
1056 | /* nv04_display.c */ | 1098 | /* nv04_display.c */ |
1057 | extern int nv04_display_create(struct drm_device *); | 1099 | extern int nv04_display_create(struct drm_device *); |
@@ -1290,14 +1332,14 @@ nv_two_reg_pll(struct drm_device *dev) | |||
1290 | return false; | 1332 | return false; |
1291 | } | 1333 | } |
1292 | 1334 | ||
1293 | #define NV50_NVSW 0x0000506e | 1335 | #define NV_SW 0x0000506e |
1294 | #define NV50_NVSW_DMA_SEMAPHORE 0x00000060 | 1336 | #define NV_SW_DMA_SEMAPHORE 0x00000060 |
1295 | #define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064 | 1337 | #define NV_SW_SEMAPHORE_OFFSET 0x00000064 |
1296 | #define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068 | 1338 | #define NV_SW_SEMAPHORE_ACQUIRE 0x00000068 |
1297 | #define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c | 1339 | #define NV_SW_SEMAPHORE_RELEASE 0x0000006c |
1298 | #define NV50_NVSW_DMA_VBLSEM 0x0000018c | 1340 | #define NV_SW_DMA_VBLSEM 0x0000018c |
1299 | #define NV50_NVSW_VBLSEM_OFFSET 0x00000400 | 1341 | #define NV_SW_VBLSEM_OFFSET 0x00000400 |
1300 | #define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404 | 1342 | #define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 |
1301 | #define NV50_NVSW_VBLSEM_RELEASE 0x00000408 | 1343 | #define NV_SW_VBLSEM_RELEASE 0x00000408 |
1302 | 1344 | ||
1303 | #endif /* __NOUVEAU_DRV_H__ */ | 1345 | #endif /* __NOUVEAU_DRV_H__ */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 84af25c238b6..0b05c869e0e7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -64,8 +64,7 @@ nouveau_fbcon_sync(struct fb_info *info) | |||
64 | return 0; | 64 | return 0; |
65 | 65 | ||
66 | if (RING_SPACE(chan, 4)) { | 66 | if (RING_SPACE(chan, 4)) { |
67 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 67 | nouveau_fbcon_gpu_lockup(info); |
68 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
69 | return 0; | 68 | return 0; |
70 | } | 69 | } |
71 | 70 | ||
@@ -86,8 +85,7 @@ nouveau_fbcon_sync(struct fb_info *info) | |||
86 | } | 85 | } |
87 | 86 | ||
88 | if (ret) { | 87 | if (ret) { |
89 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 88 | nouveau_fbcon_gpu_lockup(info); |
90 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
91 | return 0; | 89 | return 0; |
92 | } | 90 | } |
93 | 91 | ||
@@ -212,11 +210,11 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, | |||
212 | 210 | ||
213 | mode_cmd.bpp = surface_bpp; | 211 | mode_cmd.bpp = surface_bpp; |
214 | mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); | 212 | mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); |
215 | mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256); | 213 | mode_cmd.pitch = roundup(mode_cmd.pitch, 256); |
216 | mode_cmd.depth = surface_depth; | 214 | mode_cmd.depth = surface_depth; |
217 | 215 | ||
218 | size = mode_cmd.pitch * mode_cmd.height; | 216 | size = mode_cmd.pitch * mode_cmd.height; |
219 | size = ALIGN(size, PAGE_SIZE); | 217 | size = roundup(size, PAGE_SIZE); |
220 | 218 | ||
221 | ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, | 219 | ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, |
222 | 0, 0x0000, false, true, &nvbo); | 220 | 0, 0x0000, false, true, &nvbo); |
@@ -380,3 +378,12 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) | |||
380 | 378 | ||
381 | return 0; | 379 | return 0; |
382 | } | 380 | } |
381 | |||
382 | void nouveau_fbcon_gpu_lockup(struct fb_info *info) | ||
383 | { | ||
384 | struct nouveau_fbcon_par *par = info->par; | ||
385 | struct drm_device *dev = par->dev; | ||
386 | |||
387 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | ||
388 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
389 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 8531140fedbc..462e0b87b4bd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h | |||
@@ -43,5 +43,6 @@ void nouveau_fbcon_zfill(struct drm_device *dev); | |||
43 | int nv04_fbcon_accel_init(struct fb_info *info); | 43 | int nv04_fbcon_accel_init(struct fb_info *info); |
44 | int nv50_fbcon_accel_init(struct fb_info *info); | 44 | int nv50_fbcon_accel_init(struct fb_info *info); |
45 | 45 | ||
46 | void nouveau_fbcon_gpu_lockup(struct fb_info *info); | ||
46 | #endif /* __NV50_FBCON_H__ */ | 47 | #endif /* __NV50_FBCON_H__ */ |
47 | 48 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index dacac9a0842a..faddf53ff9ed 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -142,7 +142,7 @@ nouveau_fence_emit(struct nouveau_fence *fence) | |||
142 | list_add_tail(&fence->entry, &chan->fence.pending); | 142 | list_add_tail(&fence->entry, &chan->fence.pending); |
143 | spin_unlock_irqrestore(&chan->fence.lock, flags); | 143 | spin_unlock_irqrestore(&chan->fence.lock, flags); |
144 | 144 | ||
145 | BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1); | 145 | BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1); |
146 | OUT_RING(chan, fence->sequence); | 146 | OUT_RING(chan, fence->sequence); |
147 | FIRE_RING(chan); | 147 | FIRE_RING(chan); |
148 | 148 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 18fd8ac9fca7..2009db2426c3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -220,7 +220,6 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, | |||
220 | } | 220 | } |
221 | 221 | ||
222 | struct validate_op { | 222 | struct validate_op { |
223 | struct nouveau_fence *fence; | ||
224 | struct list_head vram_list; | 223 | struct list_head vram_list; |
225 | struct list_head gart_list; | 224 | struct list_head gart_list; |
226 | struct list_head both_list; | 225 | struct list_head both_list; |
@@ -252,17 +251,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) | |||
252 | } | 251 | } |
253 | 252 | ||
254 | static void | 253 | static void |
255 | validate_fini(struct validate_op *op, bool success) | 254 | validate_fini(struct validate_op *op, struct nouveau_fence* fence) |
256 | { | 255 | { |
257 | struct nouveau_fence *fence = op->fence; | 256 | validate_fini_list(&op->vram_list, fence); |
258 | 257 | validate_fini_list(&op->gart_list, fence); | |
259 | if (unlikely(!success)) | 258 | validate_fini_list(&op->both_list, fence); |
260 | op->fence = NULL; | ||
261 | |||
262 | validate_fini_list(&op->vram_list, op->fence); | ||
263 | validate_fini_list(&op->gart_list, op->fence); | ||
264 | validate_fini_list(&op->both_list, op->fence); | ||
265 | nouveau_fence_unref((void *)&fence); | ||
266 | } | 259 | } |
267 | 260 | ||
268 | static int | 261 | static int |
@@ -420,10 +413,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |||
420 | INIT_LIST_HEAD(&op->gart_list); | 413 | INIT_LIST_HEAD(&op->gart_list); |
421 | INIT_LIST_HEAD(&op->both_list); | 414 | INIT_LIST_HEAD(&op->both_list); |
422 | 415 | ||
423 | ret = nouveau_fence_new(chan, &op->fence, false); | ||
424 | if (ret) | ||
425 | return ret; | ||
426 | |||
427 | if (nr_buffers == 0) | 416 | if (nr_buffers == 0) |
428 | return 0; | 417 | return 0; |
429 | 418 | ||
@@ -541,6 +530,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
541 | struct drm_nouveau_gem_pushbuf_bo *bo = NULL; | 530 | struct drm_nouveau_gem_pushbuf_bo *bo = NULL; |
542 | struct nouveau_channel *chan; | 531 | struct nouveau_channel *chan; |
543 | struct validate_op op; | 532 | struct validate_op op; |
533 | struct nouveau_fence* fence = 0; | ||
544 | uint32_t *pushbuf = NULL; | 534 | uint32_t *pushbuf = NULL; |
545 | int ret = 0, do_reloc = 0, i; | 535 | int ret = 0, do_reloc = 0, i; |
546 | 536 | ||
@@ -597,7 +587,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
597 | 587 | ||
598 | OUT_RINGp(chan, pushbuf, req->nr_dwords); | 588 | OUT_RINGp(chan, pushbuf, req->nr_dwords); |
599 | 589 | ||
600 | ret = nouveau_fence_emit(op.fence); | 590 | ret = nouveau_fence_new(chan, &fence, true); |
601 | if (ret) { | 591 | if (ret) { |
602 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); | 592 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); |
603 | WIND_RING(chan); | 593 | WIND_RING(chan); |
@@ -605,7 +595,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
605 | } | 595 | } |
606 | 596 | ||
607 | if (nouveau_gem_pushbuf_sync(chan)) { | 597 | if (nouveau_gem_pushbuf_sync(chan)) { |
608 | ret = nouveau_fence_wait(op.fence, NULL, false, false); | 598 | ret = nouveau_fence_wait(fence, NULL, false, false); |
609 | if (ret) { | 599 | if (ret) { |
610 | for (i = 0; i < req->nr_dwords; i++) | 600 | for (i = 0; i < req->nr_dwords; i++) |
611 | NV_ERROR(dev, "0x%08x\n", pushbuf[i]); | 601 | NV_ERROR(dev, "0x%08x\n", pushbuf[i]); |
@@ -614,7 +604,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
614 | } | 604 | } |
615 | 605 | ||
616 | out: | 606 | out: |
617 | validate_fini(&op, ret == 0); | 607 | validate_fini(&op, fence); |
608 | nouveau_fence_unref((void**)&fence); | ||
618 | mutex_unlock(&dev->struct_mutex); | 609 | mutex_unlock(&dev->struct_mutex); |
619 | kfree(pushbuf); | 610 | kfree(pushbuf); |
620 | kfree(bo); | 611 | kfree(bo); |
@@ -634,6 +625,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, | |||
634 | struct drm_gem_object *gem; | 625 | struct drm_gem_object *gem; |
635 | struct nouveau_bo *pbbo; | 626 | struct nouveau_bo *pbbo; |
636 | struct validate_op op; | 627 | struct validate_op op; |
628 | struct nouveau_fence* fence = 0; | ||
637 | int i, ret = 0, do_reloc = 0; | 629 | int i, ret = 0, do_reloc = 0; |
638 | 630 | ||
639 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | 631 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; |
@@ -772,7 +764,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, | |||
772 | OUT_RING(chan, 0); | 764 | OUT_RING(chan, 0); |
773 | } | 765 | } |
774 | 766 | ||
775 | ret = nouveau_fence_emit(op.fence); | 767 | ret = nouveau_fence_new(chan, &fence, true); |
776 | if (ret) { | 768 | if (ret) { |
777 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); | 769 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); |
778 | WIND_RING(chan); | 770 | WIND_RING(chan); |
@@ -780,7 +772,8 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, | |||
780 | } | 772 | } |
781 | 773 | ||
782 | out: | 774 | out: |
783 | validate_fini(&op, ret == 0); | 775 | validate_fini(&op, fence); |
776 | nouveau_fence_unref((void**)&fence); | ||
784 | mutex_unlock(&dev->struct_mutex); | 777 | mutex_unlock(&dev->struct_mutex); |
785 | kfree(bo); | 778 | kfree(bo); |
786 | 779 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 370c72c968d1..919a619ca7fa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -635,6 +635,7 @@ nv50_pgraph_irq_handler(struct drm_device *dev) | |||
635 | 635 | ||
636 | if ((nv_rd32(dev, 0x400500) & isb) != isb) | 636 | if ((nv_rd32(dev, 0x400500) & isb) != isb) |
637 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); | 637 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); |
638 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | ||
638 | } | 639 | } |
639 | 640 | ||
640 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); | 641 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 5158a12f7844..fb9bdd6edf1f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -192,6 +192,92 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) | |||
192 | } | 192 | } |
193 | 193 | ||
194 | /* | 194 | /* |
195 | * NV10-NV40 tiling helpers | ||
196 | */ | ||
197 | |||
198 | static void | ||
199 | nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
200 | uint32_t size, uint32_t pitch) | ||
201 | { | ||
202 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
203 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
204 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
205 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
206 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
207 | |||
208 | tile->addr = addr; | ||
209 | tile->size = size; | ||
210 | tile->used = !!pitch; | ||
211 | nouveau_fence_unref((void **)&tile->fence); | ||
212 | |||
213 | if (!pfifo->cache_flush(dev)) | ||
214 | return; | ||
215 | |||
216 | pfifo->reassign(dev, false); | ||
217 | pfifo->cache_flush(dev); | ||
218 | pfifo->cache_pull(dev, false); | ||
219 | |||
220 | nouveau_wait_for_idle(dev); | ||
221 | |||
222 | pgraph->set_region_tiling(dev, i, addr, size, pitch); | ||
223 | pfb->set_region_tiling(dev, i, addr, size, pitch); | ||
224 | |||
225 | pfifo->cache_pull(dev, true); | ||
226 | pfifo->reassign(dev, true); | ||
227 | } | ||
228 | |||
229 | struct nouveau_tile_reg * | ||
230 | nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, | ||
231 | uint32_t pitch) | ||
232 | { | ||
233 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
234 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
235 | struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL; | ||
236 | int i; | ||
237 | |||
238 | spin_lock(&dev_priv->tile.lock); | ||
239 | |||
240 | for (i = 0; i < pfb->num_tiles; i++) { | ||
241 | if (tile[i].used) | ||
242 | /* Tile region in use. */ | ||
243 | continue; | ||
244 | |||
245 | if (tile[i].fence && | ||
246 | !nouveau_fence_signalled(tile[i].fence, NULL)) | ||
247 | /* Pending tile region. */ | ||
248 | continue; | ||
249 | |||
250 | if (max(tile[i].addr, addr) < | ||
251 | min(tile[i].addr + tile[i].size, addr + size)) | ||
252 | /* Kill an intersecting tile region. */ | ||
253 | nv10_mem_set_region_tiling(dev, i, 0, 0, 0); | ||
254 | |||
255 | if (pitch && !found) { | ||
256 | /* Free tile region. */ | ||
257 | nv10_mem_set_region_tiling(dev, i, addr, size, pitch); | ||
258 | found = &tile[i]; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | spin_unlock(&dev_priv->tile.lock); | ||
263 | |||
264 | return found; | ||
265 | } | ||
266 | |||
267 | void | ||
268 | nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, | ||
269 | struct nouveau_fence *fence) | ||
270 | { | ||
271 | if (fence) { | ||
272 | /* Mark it as pending. */ | ||
273 | tile->fence = fence; | ||
274 | nouveau_fence_ref(fence); | ||
275 | } | ||
276 | |||
277 | tile->used = false; | ||
278 | } | ||
279 | |||
280 | /* | ||
195 | * NV50 VM helpers | 281 | * NV50 VM helpers |
196 | */ | 282 | */ |
197 | int | 283 | int |
@@ -513,6 +599,7 @@ nouveau_mem_init(struct drm_device *dev) | |||
513 | 599 | ||
514 | INIT_LIST_HEAD(&dev_priv->ttm.bo_list); | 600 | INIT_LIST_HEAD(&dev_priv->ttm.bo_list); |
515 | spin_lock_init(&dev_priv->ttm.bo_list_lock); | 601 | spin_lock_init(&dev_priv->ttm.bo_list_lock); |
602 | spin_lock_init(&dev_priv->tile.lock); | ||
516 | 603 | ||
517 | dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); | 604 | dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); |
518 | 605 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 93379bb81bea..6c2cf81716df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -881,7 +881,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, | |||
881 | return 0; | 881 | return 0; |
882 | } | 882 | } |
883 | 883 | ||
884 | static int | 884 | int |
885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, | 885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, |
886 | struct nouveau_gpuobj **gpuobj_ret) | 886 | struct nouveau_gpuobj **gpuobj_ret) |
887 | { | 887 | { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index fa1b0e7165b9..251f1b3b38b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h | |||
@@ -349,19 +349,19 @@ | |||
349 | #define NV04_PGRAPH_BLEND 0x00400824 | 349 | #define NV04_PGRAPH_BLEND 0x00400824 |
350 | #define NV04_PGRAPH_STORED_FMT 0x00400830 | 350 | #define NV04_PGRAPH_STORED_FMT 0x00400830 |
351 | #define NV04_PGRAPH_PATT_COLORRAM 0x00400900 | 351 | #define NV04_PGRAPH_PATT_COLORRAM 0x00400900 |
352 | #define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16)) | 352 | #define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16)) |
353 | #define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16)) | 353 | #define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16)) |
354 | #define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16)) | 354 | #define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) |
355 | #define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16)) | 355 | #define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) |
356 | #define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) | 356 | #define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) |
357 | #define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) | 357 | #define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) |
358 | #define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) | 358 | #define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) |
359 | #define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) | 359 | #define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) |
360 | #define NV04_PGRAPH_U_RAM 0x00400D00 | 360 | #define NV04_PGRAPH_U_RAM 0x00400D00 |
361 | #define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16)) | 361 | #define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16)) |
362 | #define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16)) | 362 | #define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16)) |
363 | #define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16)) | 363 | #define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16)) |
364 | #define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16)) | 364 | #define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16)) |
365 | #define NV04_PGRAPH_V_RAM 0x00400D40 | 365 | #define NV04_PGRAPH_V_RAM 0x00400D40 |
366 | #define NV04_PGRAPH_W_RAM 0x00400D80 | 366 | #define NV04_PGRAPH_W_RAM 0x00400D80 |
367 | #define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 | 367 | #define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index e76ec2d207a9..09b9a46dfc0e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -76,6 +76,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
76 | engine->fifo.disable = nv04_fifo_disable; | 76 | engine->fifo.disable = nv04_fifo_disable; |
77 | engine->fifo.enable = nv04_fifo_enable; | 77 | engine->fifo.enable = nv04_fifo_enable; |
78 | engine->fifo.reassign = nv04_fifo_reassign; | 78 | engine->fifo.reassign = nv04_fifo_reassign; |
79 | engine->fifo.cache_flush = nv04_fifo_cache_flush; | ||
80 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
79 | engine->fifo.channel_id = nv04_fifo_channel_id; | 81 | engine->fifo.channel_id = nv04_fifo_channel_id; |
80 | engine->fifo.create_context = nv04_fifo_create_context; | 82 | engine->fifo.create_context = nv04_fifo_create_context; |
81 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | 83 | engine->fifo.destroy_context = nv04_fifo_destroy_context; |
@@ -100,6 +102,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
100 | engine->timer.takedown = nv04_timer_takedown; | 102 | engine->timer.takedown = nv04_timer_takedown; |
101 | engine->fb.init = nv10_fb_init; | 103 | engine->fb.init = nv10_fb_init; |
102 | engine->fb.takedown = nv10_fb_takedown; | 104 | engine->fb.takedown = nv10_fb_takedown; |
105 | engine->fb.set_region_tiling = nv10_fb_set_region_tiling; | ||
103 | engine->graph.grclass = nv10_graph_grclass; | 106 | engine->graph.grclass = nv10_graph_grclass; |
104 | engine->graph.init = nv10_graph_init; | 107 | engine->graph.init = nv10_graph_init; |
105 | engine->graph.takedown = nv10_graph_takedown; | 108 | engine->graph.takedown = nv10_graph_takedown; |
@@ -109,12 +112,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
109 | engine->graph.fifo_access = nv04_graph_fifo_access; | 112 | engine->graph.fifo_access = nv04_graph_fifo_access; |
110 | engine->graph.load_context = nv10_graph_load_context; | 113 | engine->graph.load_context = nv10_graph_load_context; |
111 | engine->graph.unload_context = nv10_graph_unload_context; | 114 | engine->graph.unload_context = nv10_graph_unload_context; |
115 | engine->graph.set_region_tiling = nv10_graph_set_region_tiling; | ||
112 | engine->fifo.channels = 32; | 116 | engine->fifo.channels = 32; |
113 | engine->fifo.init = nv10_fifo_init; | 117 | engine->fifo.init = nv10_fifo_init; |
114 | engine->fifo.takedown = nouveau_stub_takedown; | 118 | engine->fifo.takedown = nouveau_stub_takedown; |
115 | engine->fifo.disable = nv04_fifo_disable; | 119 | engine->fifo.disable = nv04_fifo_disable; |
116 | engine->fifo.enable = nv04_fifo_enable; | 120 | engine->fifo.enable = nv04_fifo_enable; |
117 | engine->fifo.reassign = nv04_fifo_reassign; | 121 | engine->fifo.reassign = nv04_fifo_reassign; |
122 | engine->fifo.cache_flush = nv04_fifo_cache_flush; | ||
123 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
118 | engine->fifo.channel_id = nv10_fifo_channel_id; | 124 | engine->fifo.channel_id = nv10_fifo_channel_id; |
119 | engine->fifo.create_context = nv10_fifo_create_context; | 125 | engine->fifo.create_context = nv10_fifo_create_context; |
120 | engine->fifo.destroy_context = nv10_fifo_destroy_context; | 126 | engine->fifo.destroy_context = nv10_fifo_destroy_context; |
@@ -139,6 +145,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
139 | engine->timer.takedown = nv04_timer_takedown; | 145 | engine->timer.takedown = nv04_timer_takedown; |
140 | engine->fb.init = nv10_fb_init; | 146 | engine->fb.init = nv10_fb_init; |
141 | engine->fb.takedown = nv10_fb_takedown; | 147 | engine->fb.takedown = nv10_fb_takedown; |
148 | engine->fb.set_region_tiling = nv10_fb_set_region_tiling; | ||
142 | engine->graph.grclass = nv20_graph_grclass; | 149 | engine->graph.grclass = nv20_graph_grclass; |
143 | engine->graph.init = nv20_graph_init; | 150 | engine->graph.init = nv20_graph_init; |
144 | engine->graph.takedown = nv20_graph_takedown; | 151 | engine->graph.takedown = nv20_graph_takedown; |
@@ -148,12 +155,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
148 | engine->graph.fifo_access = nv04_graph_fifo_access; | 155 | engine->graph.fifo_access = nv04_graph_fifo_access; |
149 | engine->graph.load_context = nv20_graph_load_context; | 156 | engine->graph.load_context = nv20_graph_load_context; |
150 | engine->graph.unload_context = nv20_graph_unload_context; | 157 | engine->graph.unload_context = nv20_graph_unload_context; |
158 | engine->graph.set_region_tiling = nv20_graph_set_region_tiling; | ||
151 | engine->fifo.channels = 32; | 159 | engine->fifo.channels = 32; |
152 | engine->fifo.init = nv10_fifo_init; | 160 | engine->fifo.init = nv10_fifo_init; |
153 | engine->fifo.takedown = nouveau_stub_takedown; | 161 | engine->fifo.takedown = nouveau_stub_takedown; |
154 | engine->fifo.disable = nv04_fifo_disable; | 162 | engine->fifo.disable = nv04_fifo_disable; |
155 | engine->fifo.enable = nv04_fifo_enable; | 163 | engine->fifo.enable = nv04_fifo_enable; |
156 | engine->fifo.reassign = nv04_fifo_reassign; | 164 | engine->fifo.reassign = nv04_fifo_reassign; |
165 | engine->fifo.cache_flush = nv04_fifo_cache_flush; | ||
166 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
157 | engine->fifo.channel_id = nv10_fifo_channel_id; | 167 | engine->fifo.channel_id = nv10_fifo_channel_id; |
158 | engine->fifo.create_context = nv10_fifo_create_context; | 168 | engine->fifo.create_context = nv10_fifo_create_context; |
159 | engine->fifo.destroy_context = nv10_fifo_destroy_context; | 169 | engine->fifo.destroy_context = nv10_fifo_destroy_context; |
@@ -178,6 +188,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
178 | engine->timer.takedown = nv04_timer_takedown; | 188 | engine->timer.takedown = nv04_timer_takedown; |
179 | engine->fb.init = nv10_fb_init; | 189 | engine->fb.init = nv10_fb_init; |
180 | engine->fb.takedown = nv10_fb_takedown; | 190 | engine->fb.takedown = nv10_fb_takedown; |
191 | engine->fb.set_region_tiling = nv10_fb_set_region_tiling; | ||
181 | engine->graph.grclass = nv30_graph_grclass; | 192 | engine->graph.grclass = nv30_graph_grclass; |
182 | engine->graph.init = nv30_graph_init; | 193 | engine->graph.init = nv30_graph_init; |
183 | engine->graph.takedown = nv20_graph_takedown; | 194 | engine->graph.takedown = nv20_graph_takedown; |
@@ -187,12 +198,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
187 | engine->graph.destroy_context = nv20_graph_destroy_context; | 198 | engine->graph.destroy_context = nv20_graph_destroy_context; |
188 | engine->graph.load_context = nv20_graph_load_context; | 199 | engine->graph.load_context = nv20_graph_load_context; |
189 | engine->graph.unload_context = nv20_graph_unload_context; | 200 | engine->graph.unload_context = nv20_graph_unload_context; |
201 | engine->graph.set_region_tiling = nv20_graph_set_region_tiling; | ||
190 | engine->fifo.channels = 32; | 202 | engine->fifo.channels = 32; |
191 | engine->fifo.init = nv10_fifo_init; | 203 | engine->fifo.init = nv10_fifo_init; |
192 | engine->fifo.takedown = nouveau_stub_takedown; | 204 | engine->fifo.takedown = nouveau_stub_takedown; |
193 | engine->fifo.disable = nv04_fifo_disable; | 205 | engine->fifo.disable = nv04_fifo_disable; |
194 | engine->fifo.enable = nv04_fifo_enable; | 206 | engine->fifo.enable = nv04_fifo_enable; |
195 | engine->fifo.reassign = nv04_fifo_reassign; | 207 | engine->fifo.reassign = nv04_fifo_reassign; |
208 | engine->fifo.cache_flush = nv04_fifo_cache_flush; | ||
209 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
196 | engine->fifo.channel_id = nv10_fifo_channel_id; | 210 | engine->fifo.channel_id = nv10_fifo_channel_id; |
197 | engine->fifo.create_context = nv10_fifo_create_context; | 211 | engine->fifo.create_context = nv10_fifo_create_context; |
198 | engine->fifo.destroy_context = nv10_fifo_destroy_context; | 212 | engine->fifo.destroy_context = nv10_fifo_destroy_context; |
@@ -218,6 +232,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
218 | engine->timer.takedown = nv04_timer_takedown; | 232 | engine->timer.takedown = nv04_timer_takedown; |
219 | engine->fb.init = nv40_fb_init; | 233 | engine->fb.init = nv40_fb_init; |
220 | engine->fb.takedown = nv40_fb_takedown; | 234 | engine->fb.takedown = nv40_fb_takedown; |
235 | engine->fb.set_region_tiling = nv40_fb_set_region_tiling; | ||
221 | engine->graph.grclass = nv40_graph_grclass; | 236 | engine->graph.grclass = nv40_graph_grclass; |
222 | engine->graph.init = nv40_graph_init; | 237 | engine->graph.init = nv40_graph_init; |
223 | engine->graph.takedown = nv40_graph_takedown; | 238 | engine->graph.takedown = nv40_graph_takedown; |
@@ -227,12 +242,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
227 | engine->graph.destroy_context = nv40_graph_destroy_context; | 242 | engine->graph.destroy_context = nv40_graph_destroy_context; |
228 | engine->graph.load_context = nv40_graph_load_context; | 243 | engine->graph.load_context = nv40_graph_load_context; |
229 | engine->graph.unload_context = nv40_graph_unload_context; | 244 | engine->graph.unload_context = nv40_graph_unload_context; |
245 | engine->graph.set_region_tiling = nv40_graph_set_region_tiling; | ||
230 | engine->fifo.channels = 32; | 246 | engine->fifo.channels = 32; |
231 | engine->fifo.init = nv40_fifo_init; | 247 | engine->fifo.init = nv40_fifo_init; |
232 | engine->fifo.takedown = nouveau_stub_takedown; | 248 | engine->fifo.takedown = nouveau_stub_takedown; |
233 | engine->fifo.disable = nv04_fifo_disable; | 249 | engine->fifo.disable = nv04_fifo_disable; |
234 | engine->fifo.enable = nv04_fifo_enable; | 250 | engine->fifo.enable = nv04_fifo_enable; |
235 | engine->fifo.reassign = nv04_fifo_reassign; | 251 | engine->fifo.reassign = nv04_fifo_reassign; |
252 | engine->fifo.cache_flush = nv04_fifo_cache_flush; | ||
253 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
236 | engine->fifo.channel_id = nv10_fifo_channel_id; | 254 | engine->fifo.channel_id = nv10_fifo_channel_id; |
237 | engine->fifo.create_context = nv40_fifo_create_context; | 255 | engine->fifo.create_context = nv40_fifo_create_context; |
238 | engine->fifo.destroy_context = nv40_fifo_destroy_context; | 256 | engine->fifo.destroy_context = nv40_fifo_destroy_context; |
@@ -624,7 +642,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
624 | dev_priv->chipset = (reg0 & 0xff00000) >> 20; | 642 | dev_priv->chipset = (reg0 & 0xff00000) >> 20; |
625 | /* NV04 or NV05 */ | 643 | /* NV04 or NV05 */ |
626 | } else if ((reg0 & 0xff00fff0) == 0x20004000) { | 644 | } else if ((reg0 & 0xff00fff0) == 0x20004000) { |
627 | dev_priv->chipset = 0x04; | 645 | if (reg0 & 0x00f00000) |
646 | dev_priv->chipset = 0x05; | ||
647 | else | ||
648 | dev_priv->chipset = 0x04; | ||
628 | } else | 649 | } else |
629 | dev_priv->chipset = 0xff; | 650 | dev_priv->chipset = 0xff; |
630 | 651 | ||
@@ -704,8 +725,8 @@ static void nouveau_close(struct drm_device *dev) | |||
704 | { | 725 | { |
705 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 726 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
706 | 727 | ||
707 | /* In the case of an error dev_priv may not be be allocated yet */ | 728 | /* In the case of an error dev_priv may not be allocated yet */ |
708 | if (dev_priv && dev_priv->card_type) | 729 | if (dev_priv) |
709 | nouveau_card_takedown(dev); | 730 | nouveau_card_takedown(dev); |
710 | } | 731 | } |
711 | 732 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 187eb84e4da5..c385d50f041b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c | |||
@@ -28,45 +28,17 @@ | |||
28 | 28 | ||
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | 30 | ||
31 | static struct vm_operations_struct nouveau_ttm_vm_ops; | ||
32 | static const struct vm_operations_struct *ttm_vm_ops; | ||
33 | |||
34 | static int | ||
35 | nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
36 | { | ||
37 | struct ttm_buffer_object *bo = vma->vm_private_data; | ||
38 | int ret; | ||
39 | |||
40 | if (unlikely(bo == NULL)) | ||
41 | return VM_FAULT_NOPAGE; | ||
42 | |||
43 | ret = ttm_vm_ops->fault(vma, vmf); | ||
44 | return ret; | ||
45 | } | ||
46 | |||
47 | int | 31 | int |
48 | nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) | 32 | nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) |
49 | { | 33 | { |
50 | struct drm_file *file_priv = filp->private_data; | 34 | struct drm_file *file_priv = filp->private_data; |
51 | struct drm_nouveau_private *dev_priv = | 35 | struct drm_nouveau_private *dev_priv = |
52 | file_priv->minor->dev->dev_private; | 36 | file_priv->minor->dev->dev_private; |
53 | int ret; | ||
54 | 37 | ||
55 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) | 38 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) |
56 | return drm_mmap(filp, vma); | 39 | return drm_mmap(filp, vma); |
57 | 40 | ||
58 | ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev); | 41 | return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev); |
59 | if (unlikely(ret != 0)) | ||
60 | return ret; | ||
61 | |||
62 | if (unlikely(ttm_vm_ops == NULL)) { | ||
63 | ttm_vm_ops = vma->vm_ops; | ||
64 | nouveau_ttm_vm_ops = *ttm_vm_ops; | ||
65 | nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault; | ||
66 | } | ||
67 | |||
68 | vma->vm_ops = &nouveau_ttm_vm_ops; | ||
69 | return 0; | ||
70 | } | 42 | } |
71 | 43 | ||
72 | static int | 44 | static int |
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index d9f32879ba38..d0e038d28948 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c | |||
@@ -212,16 +212,15 @@ out: | |||
212 | return connector_status_disconnected; | 212 | return connector_status_disconnected; |
213 | } | 213 | } |
214 | 214 | ||
215 | enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, | 215 | uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) |
216 | struct drm_connector *connector) | ||
217 | { | 216 | { |
218 | struct drm_device *dev = encoder->dev; | 217 | struct drm_device *dev = encoder->dev; |
219 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 218 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
220 | struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; | 219 | struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; |
221 | uint32_t testval, regoffset = nv04_dac_output_offset(encoder); | 220 | uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); |
222 | uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, | 221 | uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, |
223 | saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput; | 222 | saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput; |
224 | int head, present = 0; | 223 | int head; |
225 | 224 | ||
226 | #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) | 225 | #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) |
227 | if (dcb->type == OUTPUT_TV) { | 226 | if (dcb->type == OUTPUT_TV) { |
@@ -287,13 +286,7 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, | |||
287 | temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED); | 286 | temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED); |
288 | msleep(5); | 287 | msleep(5); |
289 | 288 | ||
290 | temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); | 289 | sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); |
291 | |||
292 | if (dcb->type == OUTPUT_TV) | ||
293 | present = (nv17_tv_detect(encoder, connector, temp) | ||
294 | == connector_status_connected); | ||
295 | else | ||
296 | present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI; | ||
297 | 290 | ||
298 | temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); | 291 | temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); |
299 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, | 292 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, |
@@ -310,15 +303,25 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, | |||
310 | nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1); | 303 | nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1); |
311 | nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0); | 304 | nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0); |
312 | 305 | ||
313 | if (present) { | 306 | return sample; |
314 | NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or)); | 307 | } |
308 | |||
309 | static enum drm_connector_status | ||
310 | nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
311 | { | ||
312 | struct drm_device *dev = encoder->dev; | ||
313 | struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; | ||
314 | uint32_t sample = nv17_dac_sample_load(encoder); | ||
315 | |||
316 | if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { | ||
317 | NV_INFO(dev, "Load detected on output %c\n", | ||
318 | '@' + ffs(dcb->or)); | ||
315 | return connector_status_connected; | 319 | return connector_status_connected; |
320 | } else { | ||
321 | return connector_status_disconnected; | ||
316 | } | 322 | } |
317 | |||
318 | return connector_status_disconnected; | ||
319 | } | 323 | } |
320 | 324 | ||
321 | |||
322 | static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, | 325 | static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, |
323 | struct drm_display_mode *mode, | 326 | struct drm_display_mode *mode, |
324 | struct drm_display_mode *adjusted_mode) | 327 | struct drm_display_mode *adjusted_mode) |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 09a31071ee58..d910873c1368 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
@@ -39,8 +39,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |||
39 | return; | 39 | return; |
40 | 40 | ||
41 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { | 41 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { |
42 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 42 | nouveau_fbcon_gpu_lockup(info); |
43 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
44 | } | 43 | } |
45 | 44 | ||
46 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 45 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
@@ -62,14 +61,12 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
62 | struct drm_device *dev = par->dev; | 61 | struct drm_device *dev = par->dev; |
63 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 62 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
64 | struct nouveau_channel *chan = dev_priv->channel; | 63 | struct nouveau_channel *chan = dev_priv->channel; |
65 | uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color]; | ||
66 | 64 | ||
67 | if (info->state != FBINFO_STATE_RUNNING) | 65 | if (info->state != FBINFO_STATE_RUNNING) |
68 | return; | 66 | return; |
69 | 67 | ||
70 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { | 68 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { |
71 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 69 | nouveau_fbcon_gpu_lockup(info); |
72 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
73 | } | 70 | } |
74 | 71 | ||
75 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 72 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
@@ -80,7 +77,11 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
80 | BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); | 77 | BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); |
81 | OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); | 78 | OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); |
82 | BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1); | 79 | BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1); |
83 | OUT_RING(chan, color); | 80 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
81 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) | ||
82 | OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]); | ||
83 | else | ||
84 | OUT_RING(chan, rect->color); | ||
84 | BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2); | 85 | BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2); |
85 | OUT_RING(chan, (rect->dx << 16) | rect->dy); | 86 | OUT_RING(chan, (rect->dx << 16) | rect->dy); |
86 | OUT_RING(chan, (rect->width << 16) | rect->height); | 87 | OUT_RING(chan, (rect->width << 16) | rect->height); |
@@ -109,8 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
109 | } | 110 | } |
110 | 111 | ||
111 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { | 112 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { |
112 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 113 | nouveau_fbcon_gpu_lockup(info); |
113 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
114 | } | 114 | } |
115 | 115 | ||
116 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 116 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
@@ -144,8 +144,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
144 | int iter_len = dsize > 128 ? 128 : dsize; | 144 | int iter_len = dsize > 128 ? 128 : dsize; |
145 | 145 | ||
146 | if (RING_SPACE(chan, iter_len + 1)) { | 146 | if (RING_SPACE(chan, iter_len + 1)) { |
147 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 147 | nouveau_fbcon_gpu_lockup(info); |
148 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
149 | cfb_imageblit(info, image); | 148 | cfb_imageblit(info, image); |
150 | return; | 149 | return; |
151 | } | 150 | } |
@@ -184,6 +183,7 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
184 | struct drm_device *dev = par->dev; | 183 | struct drm_device *dev = par->dev; |
185 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 184 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
186 | struct nouveau_channel *chan = dev_priv->channel; | 185 | struct nouveau_channel *chan = dev_priv->channel; |
186 | const int sub = NvSubCtxSurf2D; | ||
187 | int surface_fmt, pattern_fmt, rect_fmt; | 187 | int surface_fmt, pattern_fmt, rect_fmt; |
188 | int ret; | 188 | int ret; |
189 | 189 | ||
@@ -242,30 +242,29 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
242 | return ret; | 242 | return ret; |
243 | 243 | ||
244 | if (RING_SPACE(chan, 49)) { | 244 | if (RING_SPACE(chan, 49)) { |
245 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 245 | nouveau_fbcon_gpu_lockup(info); |
246 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
247 | return 0; | 246 | return 0; |
248 | } | 247 | } |
249 | 248 | ||
250 | BEGIN_RING(chan, 1, 0x0000, 1); | 249 | BEGIN_RING(chan, sub, 0x0000, 1); |
251 | OUT_RING(chan, NvCtxSurf2D); | 250 | OUT_RING(chan, NvCtxSurf2D); |
252 | BEGIN_RING(chan, 1, 0x0184, 2); | 251 | BEGIN_RING(chan, sub, 0x0184, 2); |
253 | OUT_RING(chan, NvDmaFB); | 252 | OUT_RING(chan, NvDmaFB); |
254 | OUT_RING(chan, NvDmaFB); | 253 | OUT_RING(chan, NvDmaFB); |
255 | BEGIN_RING(chan, 1, 0x0300, 4); | 254 | BEGIN_RING(chan, sub, 0x0300, 4); |
256 | OUT_RING(chan, surface_fmt); | 255 | OUT_RING(chan, surface_fmt); |
257 | OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); | 256 | OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); |
258 | OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); | 257 | OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); |
259 | OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); | 258 | OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); |
260 | 259 | ||
261 | BEGIN_RING(chan, 1, 0x0000, 1); | 260 | BEGIN_RING(chan, sub, 0x0000, 1); |
262 | OUT_RING(chan, NvRop); | 261 | OUT_RING(chan, NvRop); |
263 | BEGIN_RING(chan, 1, 0x0300, 1); | 262 | BEGIN_RING(chan, sub, 0x0300, 1); |
264 | OUT_RING(chan, 0x55); | 263 | OUT_RING(chan, 0x55); |
265 | 264 | ||
266 | BEGIN_RING(chan, 1, 0x0000, 1); | 265 | BEGIN_RING(chan, sub, 0x0000, 1); |
267 | OUT_RING(chan, NvImagePatt); | 266 | OUT_RING(chan, NvImagePatt); |
268 | BEGIN_RING(chan, 1, 0x0300, 8); | 267 | BEGIN_RING(chan, sub, 0x0300, 8); |
269 | OUT_RING(chan, pattern_fmt); | 268 | OUT_RING(chan, pattern_fmt); |
270 | #ifdef __BIG_ENDIAN | 269 | #ifdef __BIG_ENDIAN |
271 | OUT_RING(chan, 2); | 270 | OUT_RING(chan, 2); |
@@ -279,9 +278,9 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
279 | OUT_RING(chan, ~0); | 278 | OUT_RING(chan, ~0); |
280 | OUT_RING(chan, ~0); | 279 | OUT_RING(chan, ~0); |
281 | 280 | ||
282 | BEGIN_RING(chan, 1, 0x0000, 1); | 281 | BEGIN_RING(chan, sub, 0x0000, 1); |
283 | OUT_RING(chan, NvClipRect); | 282 | OUT_RING(chan, NvClipRect); |
284 | BEGIN_RING(chan, 1, 0x0300, 2); | 283 | BEGIN_RING(chan, sub, 0x0300, 2); |
285 | OUT_RING(chan, 0); | 284 | OUT_RING(chan, 0); |
286 | OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); | 285 | OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); |
287 | 286 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 0c3cd53c7313..f31347b8c9b0 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c | |||
@@ -71,6 +71,40 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable) | |||
71 | return (reassign == 1); | 71 | return (reassign == 1); |
72 | } | 72 | } |
73 | 73 | ||
74 | bool | ||
75 | nv04_fifo_cache_flush(struct drm_device *dev) | ||
76 | { | ||
77 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
78 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
79 | uint64_t start = ptimer->read(dev); | ||
80 | |||
81 | do { | ||
82 | if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) == | ||
83 | nv_rd32(dev, NV03_PFIFO_CACHE1_PUT)) | ||
84 | return true; | ||
85 | |||
86 | } while (ptimer->read(dev) - start < 100000000); | ||
87 | |||
88 | NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n"); | ||
89 | |||
90 | return false; | ||
91 | } | ||
92 | |||
93 | bool | ||
94 | nv04_fifo_cache_pull(struct drm_device *dev, bool enable) | ||
95 | { | ||
96 | uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0); | ||
97 | |||
98 | if (enable) { | ||
99 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1); | ||
100 | } else { | ||
101 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1); | ||
102 | nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); | ||
103 | } | ||
104 | |||
105 | return !!(pull & 1); | ||
106 | } | ||
107 | |||
74 | int | 108 | int |
75 | nv04_fifo_channel_id(struct drm_device *dev) | 109 | nv04_fifo_channel_id(struct drm_device *dev) |
76 | { | 110 | { |
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index d561d773c0f4..e260986ea65a 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c | |||
@@ -28,6 +28,10 @@ | |||
28 | #include "nouveau_drv.h" | 28 | #include "nouveau_drv.h" |
29 | 29 | ||
30 | static uint32_t nv04_graph_ctx_regs[] = { | 30 | static uint32_t nv04_graph_ctx_regs[] = { |
31 | 0x0040053c, | ||
32 | 0x00400544, | ||
33 | 0x00400540, | ||
34 | 0x00400548, | ||
31 | NV04_PGRAPH_CTX_SWITCH1, | 35 | NV04_PGRAPH_CTX_SWITCH1, |
32 | NV04_PGRAPH_CTX_SWITCH2, | 36 | NV04_PGRAPH_CTX_SWITCH2, |
33 | NV04_PGRAPH_CTX_SWITCH3, | 37 | NV04_PGRAPH_CTX_SWITCH3, |
@@ -102,69 +106,69 @@ static uint32_t nv04_graph_ctx_regs[] = { | |||
102 | NV04_PGRAPH_PATT_COLOR0, | 106 | NV04_PGRAPH_PATT_COLOR0, |
103 | NV04_PGRAPH_PATT_COLOR1, | 107 | NV04_PGRAPH_PATT_COLOR1, |
104 | NV04_PGRAPH_PATT_COLORRAM+0x00, | 108 | NV04_PGRAPH_PATT_COLORRAM+0x00, |
105 | NV04_PGRAPH_PATT_COLORRAM+0x01, | ||
106 | NV04_PGRAPH_PATT_COLORRAM+0x02, | ||
107 | NV04_PGRAPH_PATT_COLORRAM+0x03, | ||
108 | NV04_PGRAPH_PATT_COLORRAM+0x04, | 109 | NV04_PGRAPH_PATT_COLORRAM+0x04, |
109 | NV04_PGRAPH_PATT_COLORRAM+0x05, | ||
110 | NV04_PGRAPH_PATT_COLORRAM+0x06, | ||
111 | NV04_PGRAPH_PATT_COLORRAM+0x07, | ||
112 | NV04_PGRAPH_PATT_COLORRAM+0x08, | 110 | NV04_PGRAPH_PATT_COLORRAM+0x08, |
113 | NV04_PGRAPH_PATT_COLORRAM+0x09, | 111 | NV04_PGRAPH_PATT_COLORRAM+0x0c, |
114 | NV04_PGRAPH_PATT_COLORRAM+0x0A, | ||
115 | NV04_PGRAPH_PATT_COLORRAM+0x0B, | ||
116 | NV04_PGRAPH_PATT_COLORRAM+0x0C, | ||
117 | NV04_PGRAPH_PATT_COLORRAM+0x0D, | ||
118 | NV04_PGRAPH_PATT_COLORRAM+0x0E, | ||
119 | NV04_PGRAPH_PATT_COLORRAM+0x0F, | ||
120 | NV04_PGRAPH_PATT_COLORRAM+0x10, | 112 | NV04_PGRAPH_PATT_COLORRAM+0x10, |
121 | NV04_PGRAPH_PATT_COLORRAM+0x11, | ||
122 | NV04_PGRAPH_PATT_COLORRAM+0x12, | ||
123 | NV04_PGRAPH_PATT_COLORRAM+0x13, | ||
124 | NV04_PGRAPH_PATT_COLORRAM+0x14, | 113 | NV04_PGRAPH_PATT_COLORRAM+0x14, |
125 | NV04_PGRAPH_PATT_COLORRAM+0x15, | ||
126 | NV04_PGRAPH_PATT_COLORRAM+0x16, | ||
127 | NV04_PGRAPH_PATT_COLORRAM+0x17, | ||
128 | NV04_PGRAPH_PATT_COLORRAM+0x18, | 114 | NV04_PGRAPH_PATT_COLORRAM+0x18, |
129 | NV04_PGRAPH_PATT_COLORRAM+0x19, | 115 | NV04_PGRAPH_PATT_COLORRAM+0x1c, |
130 | NV04_PGRAPH_PATT_COLORRAM+0x1A, | ||
131 | NV04_PGRAPH_PATT_COLORRAM+0x1B, | ||
132 | NV04_PGRAPH_PATT_COLORRAM+0x1C, | ||
133 | NV04_PGRAPH_PATT_COLORRAM+0x1D, | ||
134 | NV04_PGRAPH_PATT_COLORRAM+0x1E, | ||
135 | NV04_PGRAPH_PATT_COLORRAM+0x1F, | ||
136 | NV04_PGRAPH_PATT_COLORRAM+0x20, | 116 | NV04_PGRAPH_PATT_COLORRAM+0x20, |
137 | NV04_PGRAPH_PATT_COLORRAM+0x21, | ||
138 | NV04_PGRAPH_PATT_COLORRAM+0x22, | ||
139 | NV04_PGRAPH_PATT_COLORRAM+0x23, | ||
140 | NV04_PGRAPH_PATT_COLORRAM+0x24, | 117 | NV04_PGRAPH_PATT_COLORRAM+0x24, |
141 | NV04_PGRAPH_PATT_COLORRAM+0x25, | ||
142 | NV04_PGRAPH_PATT_COLORRAM+0x26, | ||
143 | NV04_PGRAPH_PATT_COLORRAM+0x27, | ||
144 | NV04_PGRAPH_PATT_COLORRAM+0x28, | 118 | NV04_PGRAPH_PATT_COLORRAM+0x28, |
145 | NV04_PGRAPH_PATT_COLORRAM+0x29, | 119 | NV04_PGRAPH_PATT_COLORRAM+0x2c, |
146 | NV04_PGRAPH_PATT_COLORRAM+0x2A, | ||
147 | NV04_PGRAPH_PATT_COLORRAM+0x2B, | ||
148 | NV04_PGRAPH_PATT_COLORRAM+0x2C, | ||
149 | NV04_PGRAPH_PATT_COLORRAM+0x2D, | ||
150 | NV04_PGRAPH_PATT_COLORRAM+0x2E, | ||
151 | NV04_PGRAPH_PATT_COLORRAM+0x2F, | ||
152 | NV04_PGRAPH_PATT_COLORRAM+0x30, | 120 | NV04_PGRAPH_PATT_COLORRAM+0x30, |
153 | NV04_PGRAPH_PATT_COLORRAM+0x31, | ||
154 | NV04_PGRAPH_PATT_COLORRAM+0x32, | ||
155 | NV04_PGRAPH_PATT_COLORRAM+0x33, | ||
156 | NV04_PGRAPH_PATT_COLORRAM+0x34, | 121 | NV04_PGRAPH_PATT_COLORRAM+0x34, |
157 | NV04_PGRAPH_PATT_COLORRAM+0x35, | ||
158 | NV04_PGRAPH_PATT_COLORRAM+0x36, | ||
159 | NV04_PGRAPH_PATT_COLORRAM+0x37, | ||
160 | NV04_PGRAPH_PATT_COLORRAM+0x38, | 122 | NV04_PGRAPH_PATT_COLORRAM+0x38, |
161 | NV04_PGRAPH_PATT_COLORRAM+0x39, | 123 | NV04_PGRAPH_PATT_COLORRAM+0x3c, |
162 | NV04_PGRAPH_PATT_COLORRAM+0x3A, | 124 | NV04_PGRAPH_PATT_COLORRAM+0x40, |
163 | NV04_PGRAPH_PATT_COLORRAM+0x3B, | 125 | NV04_PGRAPH_PATT_COLORRAM+0x44, |
164 | NV04_PGRAPH_PATT_COLORRAM+0x3C, | 126 | NV04_PGRAPH_PATT_COLORRAM+0x48, |
165 | NV04_PGRAPH_PATT_COLORRAM+0x3D, | 127 | NV04_PGRAPH_PATT_COLORRAM+0x4c, |
166 | NV04_PGRAPH_PATT_COLORRAM+0x3E, | 128 | NV04_PGRAPH_PATT_COLORRAM+0x50, |
167 | NV04_PGRAPH_PATT_COLORRAM+0x3F, | 129 | NV04_PGRAPH_PATT_COLORRAM+0x54, |
130 | NV04_PGRAPH_PATT_COLORRAM+0x58, | ||
131 | NV04_PGRAPH_PATT_COLORRAM+0x5c, | ||
132 | NV04_PGRAPH_PATT_COLORRAM+0x60, | ||
133 | NV04_PGRAPH_PATT_COLORRAM+0x64, | ||
134 | NV04_PGRAPH_PATT_COLORRAM+0x68, | ||
135 | NV04_PGRAPH_PATT_COLORRAM+0x6c, | ||
136 | NV04_PGRAPH_PATT_COLORRAM+0x70, | ||
137 | NV04_PGRAPH_PATT_COLORRAM+0x74, | ||
138 | NV04_PGRAPH_PATT_COLORRAM+0x78, | ||
139 | NV04_PGRAPH_PATT_COLORRAM+0x7c, | ||
140 | NV04_PGRAPH_PATT_COLORRAM+0x80, | ||
141 | NV04_PGRAPH_PATT_COLORRAM+0x84, | ||
142 | NV04_PGRAPH_PATT_COLORRAM+0x88, | ||
143 | NV04_PGRAPH_PATT_COLORRAM+0x8c, | ||
144 | NV04_PGRAPH_PATT_COLORRAM+0x90, | ||
145 | NV04_PGRAPH_PATT_COLORRAM+0x94, | ||
146 | NV04_PGRAPH_PATT_COLORRAM+0x98, | ||
147 | NV04_PGRAPH_PATT_COLORRAM+0x9c, | ||
148 | NV04_PGRAPH_PATT_COLORRAM+0xa0, | ||
149 | NV04_PGRAPH_PATT_COLORRAM+0xa4, | ||
150 | NV04_PGRAPH_PATT_COLORRAM+0xa8, | ||
151 | NV04_PGRAPH_PATT_COLORRAM+0xac, | ||
152 | NV04_PGRAPH_PATT_COLORRAM+0xb0, | ||
153 | NV04_PGRAPH_PATT_COLORRAM+0xb4, | ||
154 | NV04_PGRAPH_PATT_COLORRAM+0xb8, | ||
155 | NV04_PGRAPH_PATT_COLORRAM+0xbc, | ||
156 | NV04_PGRAPH_PATT_COLORRAM+0xc0, | ||
157 | NV04_PGRAPH_PATT_COLORRAM+0xc4, | ||
158 | NV04_PGRAPH_PATT_COLORRAM+0xc8, | ||
159 | NV04_PGRAPH_PATT_COLORRAM+0xcc, | ||
160 | NV04_PGRAPH_PATT_COLORRAM+0xd0, | ||
161 | NV04_PGRAPH_PATT_COLORRAM+0xd4, | ||
162 | NV04_PGRAPH_PATT_COLORRAM+0xd8, | ||
163 | NV04_PGRAPH_PATT_COLORRAM+0xdc, | ||
164 | NV04_PGRAPH_PATT_COLORRAM+0xe0, | ||
165 | NV04_PGRAPH_PATT_COLORRAM+0xe4, | ||
166 | NV04_PGRAPH_PATT_COLORRAM+0xe8, | ||
167 | NV04_PGRAPH_PATT_COLORRAM+0xec, | ||
168 | NV04_PGRAPH_PATT_COLORRAM+0xf0, | ||
169 | NV04_PGRAPH_PATT_COLORRAM+0xf4, | ||
170 | NV04_PGRAPH_PATT_COLORRAM+0xf8, | ||
171 | NV04_PGRAPH_PATT_COLORRAM+0xfc, | ||
168 | NV04_PGRAPH_PATTERN, | 172 | NV04_PGRAPH_PATTERN, |
169 | 0x0040080c, | 173 | 0x0040080c, |
170 | NV04_PGRAPH_PATTERN_SHAPE, | 174 | NV04_PGRAPH_PATTERN_SHAPE, |
@@ -247,14 +251,6 @@ static uint32_t nv04_graph_ctx_regs[] = { | |||
247 | 0x004004f8, | 251 | 0x004004f8, |
248 | 0x0040047c, | 252 | 0x0040047c, |
249 | 0x004004fc, | 253 | 0x004004fc, |
250 | 0x0040053c, | ||
251 | 0x00400544, | ||
252 | 0x00400540, | ||
253 | 0x00400548, | ||
254 | 0x00400560, | ||
255 | 0x00400568, | ||
256 | 0x00400564, | ||
257 | 0x0040056c, | ||
258 | 0x00400534, | 254 | 0x00400534, |
259 | 0x00400538, | 255 | 0x00400538, |
260 | 0x00400514, | 256 | 0x00400514, |
@@ -341,9 +337,8 @@ static uint32_t nv04_graph_ctx_regs[] = { | |||
341 | 0x00400500, | 337 | 0x00400500, |
342 | 0x00400504, | 338 | 0x00400504, |
343 | NV04_PGRAPH_VALID1, | 339 | NV04_PGRAPH_VALID1, |
344 | NV04_PGRAPH_VALID2 | 340 | NV04_PGRAPH_VALID2, |
345 | 341 | NV04_PGRAPH_DEBUG_3 | |
346 | |||
347 | }; | 342 | }; |
348 | 343 | ||
349 | struct graph_state { | 344 | struct graph_state { |
@@ -388,6 +383,18 @@ nv04_graph_context_switch(struct drm_device *dev) | |||
388 | pgraph->fifo_access(dev, true); | 383 | pgraph->fifo_access(dev, true); |
389 | } | 384 | } |
390 | 385 | ||
386 | static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) | ||
387 | { | ||
388 | int i; | ||
389 | |||
390 | for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) { | ||
391 | if (nv04_graph_ctx_regs[i] == reg) | ||
392 | return &ctx->nv04[i]; | ||
393 | } | ||
394 | |||
395 | return NULL; | ||
396 | } | ||
397 | |||
391 | int nv04_graph_create_context(struct nouveau_channel *chan) | 398 | int nv04_graph_create_context(struct nouveau_channel *chan) |
392 | { | 399 | { |
393 | struct graph_state *pgraph_ctx; | 400 | struct graph_state *pgraph_ctx; |
@@ -398,15 +405,8 @@ int nv04_graph_create_context(struct nouveau_channel *chan) | |||
398 | if (pgraph_ctx == NULL) | 405 | if (pgraph_ctx == NULL) |
399 | return -ENOMEM; | 406 | return -ENOMEM; |
400 | 407 | ||
401 | /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */ | 408 | *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31; |
402 | pgraph_ctx->nv04[0] = 0x0001ffff; | 409 | |
403 | /* is it really needed ??? */ | ||
404 | #if 0 | ||
405 | dev_priv->fifos[channel].pgraph_ctx[1] = | ||
406 | nv_rd32(dev, NV_PGRAPH_DEBUG_4); | ||
407 | dev_priv->fifos[channel].pgraph_ctx[2] = | ||
408 | nv_rd32(dev, 0x004006b0); | ||
409 | #endif | ||
410 | return 0; | 410 | return 0; |
411 | } | 411 | } |
412 | 412 | ||
@@ -429,9 +429,13 @@ int nv04_graph_load_context(struct nouveau_channel *chan) | |||
429 | nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); | 429 | nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); |
430 | 430 | ||
431 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100); | 431 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100); |
432 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24); | 432 | |
433 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; | ||
434 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24); | ||
435 | |||
433 | tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2); | 436 | tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2); |
434 | nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff); | 437 | nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff); |
438 | |||
435 | return 0; | 439 | return 0; |
436 | } | 440 | } |
437 | 441 | ||
@@ -494,7 +498,7 @@ int nv04_graph_init(struct drm_device *dev) | |||
494 | nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); | 498 | nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); |
495 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); | 499 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); |
496 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; | 500 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; |
497 | tmp |= dev_priv->engine.fifo.channels << 24; | 501 | tmp |= (dev_priv->engine.fifo.channels - 1) << 24; |
498 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); | 502 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); |
499 | 503 | ||
500 | /* These don't belong here, they're part of a per-channel context */ | 504 | /* These don't belong here, they're part of a per-channel context */ |
@@ -533,7 +537,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, | |||
533 | int mthd, uint32_t data) | 537 | int mthd, uint32_t data) |
534 | { | 538 | { |
535 | struct drm_device *dev = chan->dev; | 539 | struct drm_device *dev = chan->dev; |
536 | uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff; | 540 | uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; |
537 | int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; | 541 | int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; |
538 | uint32_t tmp; | 542 | uint32_t tmp; |
539 | 543 | ||
@@ -547,7 +551,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, | |||
547 | return 0; | 551 | return 0; |
548 | } | 552 | } |
549 | 553 | ||
550 | static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = { | 554 | static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { |
551 | { 0x0150, nv04_graph_mthd_set_ref }, | 555 | { 0x0150, nv04_graph_mthd_set_ref }, |
552 | {} | 556 | {} |
553 | }; | 557 | }; |
@@ -558,7 +562,7 @@ static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = { | |||
558 | }; | 562 | }; |
559 | 563 | ||
560 | struct nouveau_pgraph_object_class nv04_graph_grclass[] = { | 564 | struct nouveau_pgraph_object_class nv04_graph_grclass[] = { |
561 | { 0x0039, false, nv04_graph_mthds_m2mf }, | 565 | { 0x0039, false, NULL }, |
562 | { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */ | 566 | { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */ |
563 | { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */ | 567 | { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */ |
564 | { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */ | 568 | { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */ |
@@ -574,6 +578,7 @@ struct nouveau_pgraph_object_class nv04_graph_grclass[] = { | |||
574 | { 0x0053, false, NULL }, /* surf3d */ | 578 | { 0x0053, false, NULL }, /* surf3d */ |
575 | { 0x0054, false, NULL }, /* tex_tri */ | 579 | { 0x0054, false, NULL }, /* tex_tri */ |
576 | { 0x0055, false, NULL }, /* multitex_tri */ | 580 | { 0x0055, false, NULL }, /* multitex_tri */ |
581 | { 0x506e, true, nv04_graph_mthds_sw }, | ||
577 | {} | 582 | {} |
578 | }; | 583 | }; |
579 | 584 | ||
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c index 79e2d104d70a..cc5cda44e501 100644 --- a/drivers/gpu/drm/nouveau/nv10_fb.c +++ b/drivers/gpu/drm/nouveau/nv10_fb.c | |||
@@ -3,17 +3,37 @@ | |||
3 | #include "nouveau_drv.h" | 3 | #include "nouveau_drv.h" |
4 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
5 | 5 | ||
6 | void | ||
7 | nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
8 | uint32_t size, uint32_t pitch) | ||
9 | { | ||
10 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
11 | uint32_t limit = max(1u, addr + size) - 1; | ||
12 | |||
13 | if (pitch) { | ||
14 | if (dev_priv->card_type >= NV_20) | ||
15 | addr |= 1; | ||
16 | else | ||
17 | addr |= 1 << 31; | ||
18 | } | ||
19 | |||
20 | nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); | ||
21 | nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); | ||
22 | nv_wr32(dev, NV10_PFB_TILE(i), addr); | ||
23 | } | ||
24 | |||
6 | int | 25 | int |
7 | nv10_fb_init(struct drm_device *dev) | 26 | nv10_fb_init(struct drm_device *dev) |
8 | { | 27 | { |
9 | uint32_t fb_bar_size; | 28 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
29 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
10 | int i; | 30 | int i; |
11 | 31 | ||
12 | fb_bar_size = drm_get_resource_len(dev, 0) - 1; | 32 | pfb->num_tiles = NV10_PFB_TILE__SIZE; |
13 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { | 33 | |
14 | nv_wr32(dev, NV10_PFB_TILE(i), 0); | 34 | /* Turn all the tiling regions off. */ |
15 | nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size); | 35 | for (i = 0; i < pfb->num_tiles; i++) |
16 | } | 36 | pfb->set_region_tiling(dev, i, 0, 0, 0); |
17 | 37 | ||
18 | return 0; | 38 | return 0; |
19 | } | 39 | } |
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 6870e0ee2e7e..fcf2cdd19493 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c | |||
@@ -807,6 +807,20 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan) | |||
807 | chan->pgraph_ctx = NULL; | 807 | chan->pgraph_ctx = NULL; |
808 | } | 808 | } |
809 | 809 | ||
810 | void | ||
811 | nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
812 | uint32_t size, uint32_t pitch) | ||
813 | { | ||
814 | uint32_t limit = max(1u, addr + size) - 1; | ||
815 | |||
816 | if (pitch) | ||
817 | addr |= 1 << 31; | ||
818 | |||
819 | nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit); | ||
820 | nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch); | ||
821 | nv_wr32(dev, NV10_PGRAPH_TILE(i), addr); | ||
822 | } | ||
823 | |||
810 | int nv10_graph_init(struct drm_device *dev) | 824 | int nv10_graph_init(struct drm_device *dev) |
811 | { | 825 | { |
812 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 826 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -838,17 +852,9 @@ int nv10_graph_init(struct drm_device *dev) | |||
838 | } else | 852 | } else |
839 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); | 853 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); |
840 | 854 | ||
841 | /* copy tile info from PFB */ | 855 | /* Turn all the tiling regions off. */ |
842 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { | 856 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) |
843 | nv_wr32(dev, NV10_PGRAPH_TILE(i), | 857 | nv10_graph_set_region_tiling(dev, i, 0, 0, 0); |
844 | nv_rd32(dev, NV10_PFB_TILE(i))); | ||
845 | nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), | ||
846 | nv_rd32(dev, NV10_PFB_TLIMIT(i))); | ||
847 | nv_wr32(dev, NV10_PGRAPH_TSIZE(i), | ||
848 | nv_rd32(dev, NV10_PFB_TSIZE(i))); | ||
849 | nv_wr32(dev, NV10_PGRAPH_TSTATUS(i), | ||
850 | nv_rd32(dev, NV10_PFB_TSTATUS(i))); | ||
851 | } | ||
852 | 858 | ||
853 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000); | 859 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000); |
854 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000); | 860 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000); |
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 81c01353a9f9..58b917c3341b 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c | |||
@@ -33,13 +33,103 @@ | |||
33 | #include "nouveau_hw.h" | 33 | #include "nouveau_hw.h" |
34 | #include "nv17_tv.h" | 34 | #include "nv17_tv.h" |
35 | 35 | ||
36 | enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, | 36 | static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) |
37 | struct drm_connector *connector, | ||
38 | uint32_t pin_mask) | ||
39 | { | 37 | { |
38 | struct drm_device *dev = encoder->dev; | ||
39 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
40 | uint32_t testval, regoffset = nv04_dac_output_offset(encoder); | ||
41 | uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, | ||
42 | fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; | ||
43 | uint32_t sample = 0; | ||
44 | int head; | ||
45 | |||
46 | #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) | ||
47 | testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); | ||
48 | if (dev_priv->vbios->tvdactestval) | ||
49 | testval = dev_priv->vbios->tvdactestval; | ||
50 | |||
51 | dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); | ||
52 | head = (dacclk & 0x100) >> 8; | ||
53 | |||
54 | /* Save the previous state. */ | ||
55 | gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1); | ||
56 | gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0); | ||
57 | fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); | ||
58 | fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); | ||
59 | fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); | ||
60 | fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); | ||
61 | test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); | ||
62 | ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c); | ||
63 | ctv_14 = NVReadRAMDAC(dev, head, 0x680c14); | ||
64 | ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); | ||
65 | |||
66 | /* Prepare the DAC for load detection. */ | ||
67 | nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true); | ||
68 | nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true); | ||
69 | |||
70 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); | ||
71 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); | ||
72 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183); | ||
73 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, | ||
74 | NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | | ||
75 | NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 | | ||
76 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG | | ||
77 | NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | | ||
78 | NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS); | ||
79 | |||
80 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0); | ||
81 | |||
82 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, | ||
83 | (dacclk & ~0xff) | 0x22); | ||
84 | msleep(1); | ||
85 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, | ||
86 | (dacclk & ~0xff) | 0x21); | ||
87 | |||
88 | NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20); | ||
89 | NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16); | ||
90 | |||
91 | /* Sample pin 0x4 (usually S-video luma). */ | ||
92 | NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff); | ||
93 | msleep(20); | ||
94 | sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset) | ||
95 | & 0x4 << 28; | ||
96 | |||
97 | /* Sample the remaining pins. */ | ||
98 | NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff); | ||
99 | msleep(20); | ||
100 | sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset) | ||
101 | & 0xa << 28; | ||
102 | |||
103 | /* Restore the previous state. */ | ||
104 | NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c); | ||
105 | NVWriteRAMDAC(dev, head, 0x680c14, ctv_14); | ||
106 | NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c); | ||
107 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk); | ||
108 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl); | ||
109 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control); | ||
110 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); | ||
111 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); | ||
112 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); | ||
113 | nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1); | ||
114 | nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0); | ||
115 | |||
116 | return sample; | ||
117 | } | ||
118 | |||
119 | static enum drm_connector_status | ||
120 | nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
121 | { | ||
122 | struct drm_device *dev = encoder->dev; | ||
123 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
124 | struct drm_mode_config *conf = &dev->mode_config; | ||
40 | struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); | 125 | struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); |
126 | struct dcb_entry *dcb = tv_enc->base.dcb; | ||
41 | 127 | ||
42 | tv_enc->pin_mask = pin_mask >> 28 & 0xe; | 128 | if (dev_priv->chipset == 0x42 || |
129 | dev_priv->chipset == 0x43) | ||
130 | tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe; | ||
131 | else | ||
132 | tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe; | ||
43 | 133 | ||
44 | switch (tv_enc->pin_mask) { | 134 | switch (tv_enc->pin_mask) { |
45 | case 0x2: | 135 | case 0x2: |
@@ -50,7 +140,7 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, | |||
50 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO; | 140 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO; |
51 | break; | 141 | break; |
52 | case 0xe: | 142 | case 0xe: |
53 | if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output) | 143 | if (dcb->tvconf.has_component_output) |
54 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component; | 144 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component; |
55 | else | 145 | else |
56 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART; | 146 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART; |
@@ -61,11 +151,16 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, | |||
61 | } | 151 | } |
62 | 152 | ||
63 | drm_connector_property_set_value(connector, | 153 | drm_connector_property_set_value(connector, |
64 | encoder->dev->mode_config.tv_subconnector_property, | 154 | conf->tv_subconnector_property, |
65 | tv_enc->subconnector); | 155 | tv_enc->subconnector); |
66 | 156 | ||
67 | return tv_enc->subconnector ? connector_status_connected : | 157 | if (tv_enc->subconnector) { |
68 | connector_status_disconnected; | 158 | NV_INFO(dev, "Load detected on output %c\n", |
159 | '@' + ffs(dcb->or)); | ||
160 | return connector_status_connected; | ||
161 | } else { | ||
162 | return connector_status_disconnected; | ||
163 | } | ||
69 | } | 164 | } |
70 | 165 | ||
71 | static const struct { | 166 | static const struct { |
@@ -633,7 +728,7 @@ static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = { | |||
633 | .prepare = nv17_tv_prepare, | 728 | .prepare = nv17_tv_prepare, |
634 | .commit = nv17_tv_commit, | 729 | .commit = nv17_tv_commit, |
635 | .mode_set = nv17_tv_mode_set, | 730 | .mode_set = nv17_tv_mode_set, |
636 | .detect = nv17_dac_detect, | 731 | .detect = nv17_tv_detect, |
637 | }; | 732 | }; |
638 | 733 | ||
639 | static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = { | 734 | static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = { |
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 18ba74f19703..d6fc0a82f03d 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c | |||
@@ -514,6 +514,27 @@ nv20_graph_rdi(struct drm_device *dev) | |||
514 | nouveau_wait_for_idle(dev); | 514 | nouveau_wait_for_idle(dev); |
515 | } | 515 | } |
516 | 516 | ||
517 | void | ||
518 | nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
519 | uint32_t size, uint32_t pitch) | ||
520 | { | ||
521 | uint32_t limit = max(1u, addr + size) - 1; | ||
522 | |||
523 | if (pitch) | ||
524 | addr |= 1; | ||
525 | |||
526 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); | ||
527 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); | ||
528 | nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); | ||
529 | |||
530 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); | ||
531 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit); | ||
532 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); | ||
533 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch); | ||
534 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); | ||
535 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr); | ||
536 | } | ||
537 | |||
517 | int | 538 | int |
518 | nv20_graph_init(struct drm_device *dev) | 539 | nv20_graph_init(struct drm_device *dev) |
519 | { | 540 | { |
@@ -572,27 +593,10 @@ nv20_graph_init(struct drm_device *dev) | |||
572 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); | 593 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); |
573 | } | 594 | } |
574 | 595 | ||
575 | /* copy tile info from PFB */ | 596 | /* Turn all the tiling regions off. */ |
576 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { | 597 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) |
577 | nv_wr32(dev, 0x00400904 + i * 0x10, | 598 | nv20_graph_set_region_tiling(dev, i, 0, 0, 0); |
578 | nv_rd32(dev, NV10_PFB_TLIMIT(i))); | 599 | |
579 | /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ | ||
580 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4); | ||
581 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, | ||
582 | nv_rd32(dev, NV10_PFB_TLIMIT(i))); | ||
583 | nv_wr32(dev, 0x00400908 + i * 0x10, | ||
584 | nv_rd32(dev, NV10_PFB_TSIZE(i))); | ||
585 | /* which is NV40_PGRAPH_TSIZE0(i) ?? */ | ||
586 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4); | ||
587 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, | ||
588 | nv_rd32(dev, NV10_PFB_TSIZE(i))); | ||
589 | nv_wr32(dev, 0x00400900 + i * 0x10, | ||
590 | nv_rd32(dev, NV10_PFB_TILE(i))); | ||
591 | /* which is NV40_PGRAPH_TILE0(i) ?? */ | ||
592 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4); | ||
593 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, | ||
594 | nv_rd32(dev, NV10_PFB_TILE(i))); | ||
595 | } | ||
596 | for (i = 0; i < 8; i++) { | 600 | for (i = 0; i < 8; i++) { |
597 | nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); | 601 | nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); |
598 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4); | 602 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4); |
@@ -704,18 +708,9 @@ nv30_graph_init(struct drm_device *dev) | |||
704 | 708 | ||
705 | nv_wr32(dev, 0x4000c0, 0x00000016); | 709 | nv_wr32(dev, 0x4000c0, 0x00000016); |
706 | 710 | ||
707 | /* copy tile info from PFB */ | 711 | /* Turn all the tiling regions off. */ |
708 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { | 712 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) |
709 | nv_wr32(dev, 0x00400904 + i * 0x10, | 713 | nv20_graph_set_region_tiling(dev, i, 0, 0, 0); |
710 | nv_rd32(dev, NV10_PFB_TLIMIT(i))); | ||
711 | /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ | ||
712 | nv_wr32(dev, 0x00400908 + i * 0x10, | ||
713 | nv_rd32(dev, NV10_PFB_TSIZE(i))); | ||
714 | /* which is NV40_PGRAPH_TSIZE0(i) ?? */ | ||
715 | nv_wr32(dev, 0x00400900 + i * 0x10, | ||
716 | nv_rd32(dev, NV10_PFB_TILE(i))); | ||
717 | /* which is NV40_PGRAPH_TILE0(i) ?? */ | ||
718 | } | ||
719 | 714 | ||
720 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); | 715 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); |
721 | nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); | 716 | nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); |
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c index ca1d27107a8e..3cd07d8d5bd7 100644 --- a/drivers/gpu/drm/nouveau/nv40_fb.c +++ b/drivers/gpu/drm/nouveau/nv40_fb.c | |||
@@ -3,12 +3,37 @@ | |||
3 | #include "nouveau_drv.h" | 3 | #include "nouveau_drv.h" |
4 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
5 | 5 | ||
6 | void | ||
7 | nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
8 | uint32_t size, uint32_t pitch) | ||
9 | { | ||
10 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
11 | uint32_t limit = max(1u, addr + size) - 1; | ||
12 | |||
13 | if (pitch) | ||
14 | addr |= 1; | ||
15 | |||
16 | switch (dev_priv->chipset) { | ||
17 | case 0x40: | ||
18 | nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); | ||
19 | nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); | ||
20 | nv_wr32(dev, NV10_PFB_TILE(i), addr); | ||
21 | break; | ||
22 | |||
23 | default: | ||
24 | nv_wr32(dev, NV40_PFB_TLIMIT(i), limit); | ||
25 | nv_wr32(dev, NV40_PFB_TSIZE(i), pitch); | ||
26 | nv_wr32(dev, NV40_PFB_TILE(i), addr); | ||
27 | break; | ||
28 | } | ||
29 | } | ||
30 | |||
6 | int | 31 | int |
7 | nv40_fb_init(struct drm_device *dev) | 32 | nv40_fb_init(struct drm_device *dev) |
8 | { | 33 | { |
9 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 34 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
10 | uint32_t fb_bar_size, tmp; | 35 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
11 | int num_tiles; | 36 | uint32_t tmp; |
12 | int i; | 37 | int i; |
13 | 38 | ||
14 | /* This is strictly a NV4x register (don't know about NV5x). */ | 39 | /* This is strictly a NV4x register (don't know about NV5x). */ |
@@ -23,35 +48,23 @@ nv40_fb_init(struct drm_device *dev) | |||
23 | case 0x45: | 48 | case 0x45: |
24 | tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2); | 49 | tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2); |
25 | nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15)); | 50 | nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15)); |
26 | num_tiles = NV10_PFB_TILE__SIZE; | 51 | pfb->num_tiles = NV10_PFB_TILE__SIZE; |
27 | break; | 52 | break; |
28 | case 0x46: /* G72 */ | 53 | case 0x46: /* G72 */ |
29 | case 0x47: /* G70 */ | 54 | case 0x47: /* G70 */ |
30 | case 0x49: /* G71 */ | 55 | case 0x49: /* G71 */ |
31 | case 0x4b: /* G73 */ | 56 | case 0x4b: /* G73 */ |
32 | case 0x4c: /* C51 (G7X version) */ | 57 | case 0x4c: /* C51 (G7X version) */ |
33 | num_tiles = NV40_PFB_TILE__SIZE_1; | 58 | pfb->num_tiles = NV40_PFB_TILE__SIZE_1; |
34 | break; | 59 | break; |
35 | default: | 60 | default: |
36 | num_tiles = NV40_PFB_TILE__SIZE_0; | 61 | pfb->num_tiles = NV40_PFB_TILE__SIZE_0; |
37 | break; | 62 | break; |
38 | } | 63 | } |
39 | 64 | ||
40 | fb_bar_size = drm_get_resource_len(dev, 0) - 1; | 65 | /* Turn all the tiling regions off. */ |
41 | switch (dev_priv->chipset) { | 66 | for (i = 0; i < pfb->num_tiles; i++) |
42 | case 0x40: | 67 | pfb->set_region_tiling(dev, i, 0, 0, 0); |
43 | for (i = 0; i < num_tiles; i++) { | ||
44 | nv_wr32(dev, NV10_PFB_TILE(i), 0); | ||
45 | nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size); | ||
46 | } | ||
47 | break; | ||
48 | default: | ||
49 | for (i = 0; i < num_tiles; i++) { | ||
50 | nv_wr32(dev, NV40_PFB_TILE(i), 0); | ||
51 | nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size); | ||
52 | } | ||
53 | break; | ||
54 | } | ||
55 | 68 | ||
56 | return 0; | 69 | return 0; |
57 | } | 70 | } |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 2b332bb55acf..53e8afe1dcd1 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -181,6 +181,48 @@ nv40_graph_unload_context(struct drm_device *dev) | |||
181 | return ret; | 181 | return ret; |
182 | } | 182 | } |
183 | 183 | ||
184 | void | ||
185 | nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
186 | uint32_t size, uint32_t pitch) | ||
187 | { | ||
188 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
189 | uint32_t limit = max(1u, addr + size) - 1; | ||
190 | |||
191 | if (pitch) | ||
192 | addr |= 1; | ||
193 | |||
194 | switch (dev_priv->chipset) { | ||
195 | case 0x44: | ||
196 | case 0x4a: | ||
197 | case 0x4e: | ||
198 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); | ||
199 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); | ||
200 | nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); | ||
201 | break; | ||
202 | |||
203 | case 0x46: | ||
204 | case 0x47: | ||
205 | case 0x49: | ||
206 | case 0x4b: | ||
207 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch); | ||
208 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit); | ||
209 | nv_wr32(dev, NV47_PGRAPH_TILE(i), addr); | ||
210 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); | ||
211 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); | ||
212 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); | ||
213 | break; | ||
214 | |||
215 | default: | ||
216 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); | ||
217 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); | ||
218 | nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); | ||
219 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); | ||
220 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); | ||
221 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); | ||
222 | break; | ||
223 | } | ||
224 | } | ||
225 | |||
184 | /* | 226 | /* |
185 | * G70 0x47 | 227 | * G70 0x47 |
186 | * G71 0x49 | 228 | * G71 0x49 |
@@ -195,7 +237,8 @@ nv40_graph_init(struct drm_device *dev) | |||
195 | { | 237 | { |
196 | struct drm_nouveau_private *dev_priv = | 238 | struct drm_nouveau_private *dev_priv = |
197 | (struct drm_nouveau_private *)dev->dev_private; | 239 | (struct drm_nouveau_private *)dev->dev_private; |
198 | uint32_t vramsz, tmp; | 240 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
241 | uint32_t vramsz; | ||
199 | int i, j; | 242 | int i, j; |
200 | 243 | ||
201 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | 244 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & |
@@ -292,74 +335,9 @@ nv40_graph_init(struct drm_device *dev) | |||
292 | nv_wr32(dev, 0x400b38, 0x2ffff800); | 335 | nv_wr32(dev, 0x400b38, 0x2ffff800); |
293 | nv_wr32(dev, 0x400b3c, 0x00006000); | 336 | nv_wr32(dev, 0x400b3c, 0x00006000); |
294 | 337 | ||
295 | /* copy tile info from PFB */ | 338 | /* Turn all the tiling regions off. */ |
296 | switch (dev_priv->chipset) { | 339 | for (i = 0; i < pfb->num_tiles; i++) |
297 | case 0x40: /* vanilla NV40 */ | 340 | nv40_graph_set_region_tiling(dev, i, 0, 0, 0); |
298 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { | ||
299 | tmp = nv_rd32(dev, NV10_PFB_TILE(i)); | ||
300 | nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp); | ||
301 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp); | ||
302 | tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i)); | ||
303 | nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp); | ||
304 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp); | ||
305 | tmp = nv_rd32(dev, NV10_PFB_TSIZE(i)); | ||
306 | nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp); | ||
307 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp); | ||
308 | tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i)); | ||
309 | nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp); | ||
310 | nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp); | ||
311 | } | ||
312 | break; | ||
313 | case 0x44: | ||
314 | case 0x4a: | ||
315 | case 0x4e: /* NV44-based cores don't have 0x406900? */ | ||
316 | for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) { | ||
317 | tmp = nv_rd32(dev, NV40_PFB_TILE(i)); | ||
318 | nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp); | ||
319 | tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i)); | ||
320 | nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp); | ||
321 | tmp = nv_rd32(dev, NV40_PFB_TSIZE(i)); | ||
322 | nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp); | ||
323 | tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i)); | ||
324 | nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp); | ||
325 | } | ||
326 | break; | ||
327 | case 0x46: | ||
328 | case 0x47: | ||
329 | case 0x49: | ||
330 | case 0x4b: /* G7X-based cores */ | ||
331 | for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) { | ||
332 | tmp = nv_rd32(dev, NV40_PFB_TILE(i)); | ||
333 | nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp); | ||
334 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp); | ||
335 | tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i)); | ||
336 | nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp); | ||
337 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp); | ||
338 | tmp = nv_rd32(dev, NV40_PFB_TSIZE(i)); | ||
339 | nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp); | ||
340 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp); | ||
341 | tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i)); | ||
342 | nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp); | ||
343 | nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp); | ||
344 | } | ||
345 | break; | ||
346 | default: /* everything else */ | ||
347 | for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) { | ||
348 | tmp = nv_rd32(dev, NV40_PFB_TILE(i)); | ||
349 | nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp); | ||
350 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp); | ||
351 | tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i)); | ||
352 | nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp); | ||
353 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp); | ||
354 | tmp = nv_rd32(dev, NV40_PFB_TSIZE(i)); | ||
355 | nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp); | ||
356 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp); | ||
357 | tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i)); | ||
358 | nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp); | ||
359 | nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp); | ||
360 | } | ||
361 | break; | ||
362 | } | ||
363 | 341 | ||
364 | /* begin RAM config */ | 342 | /* begin RAM config */ |
365 | vramsz = drm_get_resource_len(dev, 0) - 1; | 343 | vramsz = drm_get_resource_len(dev, 0) - 1; |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index a9263d92a231..90f0bf59fbcd 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -690,9 +690,21 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, | |||
690 | int pxclk) | 690 | int pxclk) |
691 | { | 691 | { |
692 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 692 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
693 | struct nouveau_connector *nv_connector = NULL; | ||
694 | struct drm_encoder *encoder; | ||
693 | struct nvbios *bios = &dev_priv->VBIOS; | 695 | struct nvbios *bios = &dev_priv->VBIOS; |
694 | uint32_t mc, script = 0, or; | 696 | uint32_t mc, script = 0, or; |
695 | 697 | ||
698 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
699 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
700 | |||
701 | if (nv_encoder->dcb != dcbent) | ||
702 | continue; | ||
703 | |||
704 | nv_connector = nouveau_encoder_connector_get(nv_encoder); | ||
705 | break; | ||
706 | } | ||
707 | |||
696 | or = ffs(dcbent->or) - 1; | 708 | or = ffs(dcbent->or) - 1; |
697 | mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or); | 709 | mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or); |
698 | switch (dcbent->type) { | 710 | switch (dcbent->type) { |
@@ -711,6 +723,11 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, | |||
711 | } else | 723 | } else |
712 | if (bios->fp.strapless_is_24bit & 1) | 724 | if (bios->fp.strapless_is_24bit & 1) |
713 | script |= 0x0200; | 725 | script |= 0x0200; |
726 | |||
727 | if (nv_connector && nv_connector->edid && | ||
728 | (nv_connector->edid->revision >= 4) && | ||
729 | (nv_connector->edid->input & 0x70) >= 0x20) | ||
730 | script |= 0x0200; | ||
714 | } | 731 | } |
715 | 732 | ||
716 | if (nouveau_uscript_lvds >= 0) { | 733 | if (nouveau_uscript_lvds >= 0) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 6bcc6d39e9b0..e4f279ee61cf 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
@@ -16,9 +16,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
16 | 16 | ||
17 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && | 17 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && |
18 | RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) { | 18 | RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) { |
19 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 19 | nouveau_fbcon_gpu_lockup(info); |
20 | |||
21 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
22 | } | 20 | } |
23 | 21 | ||
24 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 22 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
@@ -31,7 +29,11 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
31 | OUT_RING(chan, 1); | 29 | OUT_RING(chan, 1); |
32 | } | 30 | } |
33 | BEGIN_RING(chan, NvSub2D, 0x0588, 1); | 31 | BEGIN_RING(chan, NvSub2D, 0x0588, 1); |
34 | OUT_RING(chan, rect->color); | 32 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
33 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) | ||
34 | OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]); | ||
35 | else | ||
36 | OUT_RING(chan, rect->color); | ||
35 | BEGIN_RING(chan, NvSub2D, 0x0600, 4); | 37 | BEGIN_RING(chan, NvSub2D, 0x0600, 4); |
36 | OUT_RING(chan, rect->dx); | 38 | OUT_RING(chan, rect->dx); |
37 | OUT_RING(chan, rect->dy); | 39 | OUT_RING(chan, rect->dy); |
@@ -56,9 +58,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |||
56 | return; | 58 | return; |
57 | 59 | ||
58 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) { | 60 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) { |
59 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 61 | nouveau_fbcon_gpu_lockup(info); |
60 | |||
61 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
62 | } | 62 | } |
63 | 63 | ||
64 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 64 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
@@ -101,8 +101,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
101 | } | 101 | } |
102 | 102 | ||
103 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { | 103 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { |
104 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 104 | nouveau_fbcon_gpu_lockup(info); |
105 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
106 | } | 105 | } |
107 | 106 | ||
108 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 107 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
@@ -135,9 +134,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
135 | int push = dwords > 2047 ? 2047 : dwords; | 134 | int push = dwords > 2047 ? 2047 : dwords; |
136 | 135 | ||
137 | if (RING_SPACE(chan, push + 1)) { | 136 | if (RING_SPACE(chan, push + 1)) { |
138 | NV_ERROR(dev, | 137 | nouveau_fbcon_gpu_lockup(info); |
139 | "GPU lockup - switching to software fbcon\n"); | ||
140 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
141 | cfb_imageblit(info, image); | 138 | cfb_imageblit(info, image); |
142 | return; | 139 | return; |
143 | } | 140 | } |
@@ -199,7 +196,7 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
199 | 196 | ||
200 | ret = RING_SPACE(chan, 59); | 197 | ret = RING_SPACE(chan, 59); |
201 | if (ret) { | 198 | if (ret) { |
202 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 199 | nouveau_fbcon_gpu_lockup(info); |
203 | return ret; | 200 | return ret; |
204 | } | 201 | } |
205 | 202 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index b7282284f080..39caf167587d 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -384,8 +384,8 @@ nv50_fifo_load_context(struct nouveau_channel *chan) | |||
384 | nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), | 384 | nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), |
385 | nv_ro32(dev, cache, (ptr * 2) + 1)); | 385 | nv_ro32(dev, cache, (ptr * 2) + 1)); |
386 | } | 386 | } |
387 | nv_wr32(dev, 0x3210, cnt << 2); | 387 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2); |
388 | nv_wr32(dev, 0x3270, 0); | 388 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); |
389 | 389 | ||
390 | /* guessing that all the 0x34xx regs aren't on NV50 */ | 390 | /* guessing that all the 0x34xx regs aren't on NV50 */ |
391 | if (!IS_G80) { | 391 | if (!IS_G80) { |
@@ -398,8 +398,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan) | |||
398 | 398 | ||
399 | dev_priv->engine.instmem.finish_access(dev); | 399 | dev_priv->engine.instmem.finish_access(dev); |
400 | 400 | ||
401 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | ||
402 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); | ||
403 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); | 401 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); |
404 | return 0; | 402 | return 0; |
405 | } | 403 | } |