aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c19
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c33
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c61
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c45
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c382
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h11
-rw-r--r--drivers/gpu/drm/radeon/ni.c381
-rw-r--r--drivers/gpu/drm/radeon/nid.h11
-rw-r--r--drivers/gpu/drm/radeon/r600.c214
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c1
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h13
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c44
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770.c292
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h4
-rw-r--r--drivers/gpu/drm/radeon/si.c477
-rw-r--r--drivers/gpu/drm/radeon/sid.h19
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c13
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
33 files changed, 772 insertions, 1395 deletions
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index d7038230b71e..7053140c6596 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,9 +35,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
35 {0,} 35 {0,}
36}; 36};
37 37
38
39static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
40{
41 struct apertures_struct *ap;
42 bool primary = false;
43
44 ap = alloc_apertures(1);
45 ap->ranges[0].base = pci_resource_start(pdev, 0);
46 ap->ranges[0].size = pci_resource_len(pdev, 0);
47
48#ifdef CONFIG_X86
49 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
50#endif
51 remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
52 kfree(ap);
53}
54
38static int __devinit 55static int __devinit
39cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 56cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40{ 57{
58 cirrus_kick_out_firmware_fb(pdev);
59
41 return drm_get_pci_dev(pdev, ent, &driver); 60 return drm_get_pci_dev(pdev, ent, &driver);
42} 61}
43 62
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 21bdfa8836f7..64ea597cb6d3 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -145,7 +145,7 @@ struct cirrus_device {
145 struct ttm_bo_device bdev; 145 struct ttm_bo_device bdev;
146 atomic_t validate_sequence; 146 atomic_t validate_sequence;
147 } ttm; 147 } ttm;
148 148 bool mm_inited;
149}; 149};
150 150
151 151
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 2ebcd11a5023..50e170f879de 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -275,12 +275,17 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
275 pci_resource_len(dev->pdev, 0), 275 pci_resource_len(dev->pdev, 0),
276 DRM_MTRR_WC); 276 DRM_MTRR_WC);
277 277
278 cirrus->mm_inited = true;
278 return 0; 279 return 0;
279} 280}
280 281
281void cirrus_mm_fini(struct cirrus_device *cirrus) 282void cirrus_mm_fini(struct cirrus_device *cirrus)
282{ 283{
283 struct drm_device *dev = cirrus->dev; 284 struct drm_device *dev = cirrus->dev;
285
286 if (!cirrus->mm_inited)
287 return;
288
284 ttm_bo_device_release(&cirrus->ttm.bdev); 289 ttm_bo_device_release(&cirrus->ttm.bdev);
285 290
286 cirrus_ttm_global_release(cirrus); 291 cirrus_ttm_global_release(cirrus);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 608bddfc7e35..eb92fe257a39 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -30,7 +30,7 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/export.h> 33#include <linux/module.h>
34#include "drmP.h" 34#include "drmP.h"
35#include "drm_edid.h" 35#include "drm_edid.h"
36#include "drm_edid_modes.h" 36#include "drm_edid_modes.h"
@@ -66,6 +66,8 @@
66#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) 66#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
67/* use +hsync +vsync for detailed mode */ 67/* use +hsync +vsync for detailed mode */
68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) 68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
69/* Force reduced-blanking timings for detailed modes */
70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
69 71
70struct detailed_mode_closure { 72struct detailed_mode_closure {
71 struct drm_connector *connector; 73 struct drm_connector *connector;
@@ -120,6 +122,9 @@ static struct edid_quirk {
120 /* Samsung SyncMaster 22[5-6]BW */ 122 /* Samsung SyncMaster 22[5-6]BW */
121 { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, 123 { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
122 { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, 124 { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
125
126 /* ViewSonic VA2026w */
127 { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
123}; 128};
124 129
125/*** DDC fetch and block validation ***/ 130/*** DDC fetch and block validation ***/
@@ -144,6 +149,10 @@ int drm_edid_header_is_valid(const u8 *raw_edid)
144} 149}
145EXPORT_SYMBOL(drm_edid_header_is_valid); 150EXPORT_SYMBOL(drm_edid_header_is_valid);
146 151
152static int edid_fixup __read_mostly = 6;
153module_param_named(edid_fixup, edid_fixup, int, 0400);
154MODULE_PARM_DESC(edid_fixup,
155 "Minimum number of valid EDID header bytes (0-8, default 6)");
147 156
148/* 157/*
149 * Sanity check the EDID block (base or extension). Return 0 if the block 158 * Sanity check the EDID block (base or extension). Return 0 if the block
@@ -155,10 +164,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
155 u8 csum = 0; 164 u8 csum = 0;
156 struct edid *edid = (struct edid *)raw_edid; 165 struct edid *edid = (struct edid *)raw_edid;
157 166
167 if (edid_fixup > 8 || edid_fixup < 0)
168 edid_fixup = 6;
169
158 if (block == 0) { 170 if (block == 0) {
159 int score = drm_edid_header_is_valid(raw_edid); 171 int score = drm_edid_header_is_valid(raw_edid);
160 if (score == 8) ; 172 if (score == 8) ;
161 else if (score >= 6) { 173 else if (score >= edid_fixup) {
162 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); 174 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
163 memcpy(raw_edid, edid_header, sizeof(edid_header)); 175 memcpy(raw_edid, edid_header, sizeof(edid_header));
164 } else { 176 } else {
@@ -885,12 +897,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
885 "Wrong Hsync/Vsync pulse width\n"); 897 "Wrong Hsync/Vsync pulse width\n");
886 return NULL; 898 return NULL;
887 } 899 }
900
901 if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
902 mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
903 if (!mode)
904 return NULL;
905
906 goto set_size;
907 }
908
888 mode = drm_mode_create(dev); 909 mode = drm_mode_create(dev);
889 if (!mode) 910 if (!mode)
890 return NULL; 911 return NULL;
891 912
892 mode->type = DRM_MODE_TYPE_DRIVER;
893
894 if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) 913 if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
895 timing->pixel_clock = cpu_to_le16(1088); 914 timing->pixel_clock = cpu_to_le16(1088);
896 915
@@ -914,8 +933,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
914 933
915 drm_mode_do_interlace_quirk(mode, pt); 934 drm_mode_do_interlace_quirk(mode, pt);
916 935
917 drm_mode_set_name(mode);
918
919 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 936 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
920 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; 937 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
921 } 938 }
@@ -925,6 +942,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
925 mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? 942 mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
926 DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; 943 DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
927 944
945set_size:
928 mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; 946 mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
929 mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; 947 mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
930 948
@@ -938,6 +956,9 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
938 mode->height_mm = edid->height_cm * 10; 956 mode->height_mm = edid->height_cm * 10;
939 } 957 }
940 958
959 mode->type = DRM_MODE_TYPE_DRIVER;
960 drm_mode_set_name(mode);
961
941 return mode; 962 return mode;
942} 963}
943 964
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index f920fb5e42b6..fa9439159ebd 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -130,11 +130,10 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
130 return -EINVAL; 130 return -EINVAL;
131 131
132 /* This is all entirely broken */ 132 /* This is all entirely broken */
133 down_write(&current->mm->mmap_sem);
134 old_fops = file_priv->filp->f_op; 133 old_fops = file_priv->filp->f_op;
135 file_priv->filp->f_op = &i810_buffer_fops; 134 file_priv->filp->f_op = &i810_buffer_fops;
136 dev_priv->mmap_buffer = buf; 135 dev_priv->mmap_buffer = buf;
137 buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total, 136 buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
138 PROT_READ | PROT_WRITE, 137 PROT_READ | PROT_WRITE,
139 MAP_SHARED, buf->bus_address); 138 MAP_SHARED, buf->bus_address);
140 dev_priv->mmap_buffer = NULL; 139 dev_priv->mmap_buffer = NULL;
@@ -145,7 +144,6 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
145 retcode = PTR_ERR(buf_priv->virtual); 144 retcode = PTR_ERR(buf_priv->virtual);
146 buf_priv->virtual = NULL; 145 buf_priv->virtual = NULL;
147 } 146 }
148 up_write(&current->mm->mmap_sem);
149 147
150 return retcode; 148 return retcode;
151} 149}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cff630757650..b0b676abde0d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -943,6 +943,9 @@ struct drm_i915_gem_object {
943 943
944 /* prime dma-buf support */ 944 /* prime dma-buf support */
945 struct sg_table *sg_table; 945 struct sg_table *sg_table;
946 void *dma_buf_vmapping;
947 int vmapping_count;
948
946 /** 949 /**
947 * Used for performing relocations during execbuffer insertion. 950 * Used for performing relocations during execbuffer insertion.
948 */ 951 */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 8e269178d6a5..aa308e1337db 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -74,6 +74,59 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
74 } 74 }
75} 75}
76 76
77static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
78{
79 struct drm_i915_gem_object *obj = dma_buf->priv;
80 struct drm_device *dev = obj->base.dev;
81 int ret;
82
83 ret = i915_mutex_lock_interruptible(dev);
84 if (ret)
85 return ERR_PTR(ret);
86
87 if (obj->dma_buf_vmapping) {
88 obj->vmapping_count++;
89 goto out_unlock;
90 }
91
92 if (!obj->pages) {
93 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
94 if (ret) {
95 mutex_unlock(&dev->struct_mutex);
96 return ERR_PTR(ret);
97 }
98 }
99
100 obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
101 if (!obj->dma_buf_vmapping) {
102 DRM_ERROR("failed to vmap object\n");
103 goto out_unlock;
104 }
105
106 obj->vmapping_count = 1;
107out_unlock:
108 mutex_unlock(&dev->struct_mutex);
109 return obj->dma_buf_vmapping;
110}
111
112static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
113{
114 struct drm_i915_gem_object *obj = dma_buf->priv;
115 struct drm_device *dev = obj->base.dev;
116 int ret;
117
118 ret = i915_mutex_lock_interruptible(dev);
119 if (ret)
120 return;
121
122 --obj->vmapping_count;
123 if (obj->vmapping_count == 0) {
124 vunmap(obj->dma_buf_vmapping);
125 obj->dma_buf_vmapping = NULL;
126 }
127 mutex_unlock(&dev->struct_mutex);
128}
129
77static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) 130static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
78{ 131{
79 return NULL; 132 return NULL;
@@ -93,6 +146,11 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
93 146
94} 147}
95 148
149static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
150{
151 return -EINVAL;
152}
153
96static const struct dma_buf_ops i915_dmabuf_ops = { 154static const struct dma_buf_ops i915_dmabuf_ops = {
97 .map_dma_buf = i915_gem_map_dma_buf, 155 .map_dma_buf = i915_gem_map_dma_buf,
98 .unmap_dma_buf = i915_gem_unmap_dma_buf, 156 .unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -101,6 +159,9 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
101 .kmap_atomic = i915_gem_dmabuf_kmap_atomic, 159 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
102 .kunmap = i915_gem_dmabuf_kunmap, 160 .kunmap = i915_gem_dmabuf_kunmap,
103 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, 161 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
162 .mmap = i915_gem_dmabuf_mmap,
163 .vmap = i915_gem_dmabuf_vmap,
164 .vunmap = i915_gem_dmabuf_vunmap,
104}; 165};
105 166
106struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 167struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 3c8e04f54713..93e832d6c328 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -41,9 +41,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
41 41
42MODULE_DEVICE_TABLE(pci, pciidlist); 42MODULE_DEVICE_TABLE(pci, pciidlist);
43 43
44static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
45{
46 struct apertures_struct *ap;
47 bool primary = false;
48
49 ap = alloc_apertures(1);
50 ap->ranges[0].base = pci_resource_start(pdev, 0);
51 ap->ranges[0].size = pci_resource_len(pdev, 0);
52
53#ifdef CONFIG_X86
54 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
55#endif
56 remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
57 kfree(ap);
58}
59
60
44static int __devinit 61static int __devinit
45mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 62mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
46{ 63{
64 mgag200_kick_out_firmware_fb(pdev);
65
47 return drm_get_pci_dev(pdev, ent, &driver); 66 return drm_get_pci_dev(pdev, ent, &driver);
48} 67}
49 68
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 634d222c93de..8613cb23808c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -123,6 +123,9 @@ struct nouveau_bo {
123 123
124 struct drm_gem_object *gem; 124 struct drm_gem_object *gem;
125 int pin_refcnt; 125 int pin_refcnt;
126
127 struct ttm_bo_kmap_obj dma_buf_vmap;
128 int vmapping_count;
126}; 129};
127 130
128#define nouveau_bo_tile_layout(nvbo) \ 131#define nouveau_bo_tile_layout(nvbo) \
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index c58aab7370c5..a89240e5fb29 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -61,6 +61,48 @@ static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
61 61
62} 62}
63 63
64static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
65{
66 return -EINVAL;
67}
68
69static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
70{
71 struct nouveau_bo *nvbo = dma_buf->priv;
72 struct drm_device *dev = nvbo->gem->dev;
73 int ret;
74
75 mutex_lock(&dev->struct_mutex);
76 if (nvbo->vmapping_count) {
77 nvbo->vmapping_count++;
78 goto out_unlock;
79 }
80
81 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
82 &nvbo->dma_buf_vmap);
83 if (ret) {
84 mutex_unlock(&dev->struct_mutex);
85 return ERR_PTR(ret);
86 }
87 nvbo->vmapping_count = 1;
88out_unlock:
89 mutex_unlock(&dev->struct_mutex);
90 return nvbo->dma_buf_vmap.virtual;
91}
92
93static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
94{
95 struct nouveau_bo *nvbo = dma_buf->priv;
96 struct drm_device *dev = nvbo->gem->dev;
97
98 mutex_lock(&dev->struct_mutex);
99 nvbo->vmapping_count--;
100 if (nvbo->vmapping_count == 0) {
101 ttm_bo_kunmap(&nvbo->dma_buf_vmap);
102 }
103 mutex_unlock(&dev->struct_mutex);
104}
105
64static const struct dma_buf_ops nouveau_dmabuf_ops = { 106static const struct dma_buf_ops nouveau_dmabuf_ops = {
65 .map_dma_buf = nouveau_gem_map_dma_buf, 107 .map_dma_buf = nouveau_gem_map_dma_buf,
66 .unmap_dma_buf = nouveau_gem_unmap_dma_buf, 108 .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
@@ -69,6 +111,9 @@ static const struct dma_buf_ops nouveau_dmabuf_ops = {
69 .kmap_atomic = nouveau_gem_kmap_atomic, 111 .kmap_atomic = nouveau_gem_kmap_atomic,
70 .kunmap = nouveau_gem_kunmap, 112 .kunmap = nouveau_gem_kunmap,
71 .kunmap_atomic = nouveau_gem_kunmap_atomic, 113 .kunmap_atomic = nouveau_gem_kunmap_atomic,
114 .mmap = nouveau_gem_prime_mmap,
115 .vmap = nouveau_gem_prime_vmap,
116 .vunmap = nouveau_gem_prime_vunmap,
72}; 117};
73 118
74static int 119static int
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 58991af90502..01550d05e273 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1029,6 +1029,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1029 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 1029 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1030 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 1030 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1031 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 1031 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1032 if ((rdev->family == CHIP_JUNIPER) ||
1033 (rdev->family == CHIP_CYPRESS) ||
1034 (rdev->family == CHIP_HEMLOCK) ||
1035 (rdev->family == CHIP_BARTS))
1036 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
1032 } 1037 }
1033 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 1038 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1034 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 1039 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
@@ -1553,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1553/* 1558/*
1554 * Core functions 1559 * Core functions
1555 */ 1560 */
1556static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1557 u32 num_tile_pipes,
1558 u32 num_backends,
1559 u32 backend_disable_mask)
1560{
1561 u32 backend_map = 0;
1562 u32 enabled_backends_mask = 0;
1563 u32 enabled_backends_count = 0;
1564 u32 cur_pipe;
1565 u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
1566 u32 cur_backend = 0;
1567 u32 i;
1568 bool force_no_swizzle;
1569
1570 if (num_tile_pipes > EVERGREEN_MAX_PIPES)
1571 num_tile_pipes = EVERGREEN_MAX_PIPES;
1572 if (num_tile_pipes < 1)
1573 num_tile_pipes = 1;
1574 if (num_backends > EVERGREEN_MAX_BACKENDS)
1575 num_backends = EVERGREEN_MAX_BACKENDS;
1576 if (num_backends < 1)
1577 num_backends = 1;
1578
1579 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1580 if (((backend_disable_mask >> i) & 1) == 0) {
1581 enabled_backends_mask |= (1 << i);
1582 ++enabled_backends_count;
1583 }
1584 if (enabled_backends_count == num_backends)
1585 break;
1586 }
1587
1588 if (enabled_backends_count == 0) {
1589 enabled_backends_mask = 1;
1590 enabled_backends_count = 1;
1591 }
1592
1593 if (enabled_backends_count != num_backends)
1594 num_backends = enabled_backends_count;
1595
1596 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
1597 switch (rdev->family) {
1598 case CHIP_CEDAR:
1599 case CHIP_REDWOOD:
1600 case CHIP_PALM:
1601 case CHIP_SUMO:
1602 case CHIP_SUMO2:
1603 case CHIP_TURKS:
1604 case CHIP_CAICOS:
1605 force_no_swizzle = false;
1606 break;
1607 case CHIP_CYPRESS:
1608 case CHIP_HEMLOCK:
1609 case CHIP_JUNIPER:
1610 case CHIP_BARTS:
1611 default:
1612 force_no_swizzle = true;
1613 break;
1614 }
1615 if (force_no_swizzle) {
1616 bool last_backend_enabled = false;
1617
1618 force_no_swizzle = false;
1619 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1620 if (((enabled_backends_mask >> i) & 1) == 1) {
1621 if (last_backend_enabled)
1622 force_no_swizzle = true;
1623 last_backend_enabled = true;
1624 } else
1625 last_backend_enabled = false;
1626 }
1627 }
1628
1629 switch (num_tile_pipes) {
1630 case 1:
1631 case 3:
1632 case 5:
1633 case 7:
1634 DRM_ERROR("odd number of pipes!\n");
1635 break;
1636 case 2:
1637 swizzle_pipe[0] = 0;
1638 swizzle_pipe[1] = 1;
1639 break;
1640 case 4:
1641 if (force_no_swizzle) {
1642 swizzle_pipe[0] = 0;
1643 swizzle_pipe[1] = 1;
1644 swizzle_pipe[2] = 2;
1645 swizzle_pipe[3] = 3;
1646 } else {
1647 swizzle_pipe[0] = 0;
1648 swizzle_pipe[1] = 2;
1649 swizzle_pipe[2] = 1;
1650 swizzle_pipe[3] = 3;
1651 }
1652 break;
1653 case 6:
1654 if (force_no_swizzle) {
1655 swizzle_pipe[0] = 0;
1656 swizzle_pipe[1] = 1;
1657 swizzle_pipe[2] = 2;
1658 swizzle_pipe[3] = 3;
1659 swizzle_pipe[4] = 4;
1660 swizzle_pipe[5] = 5;
1661 } else {
1662 swizzle_pipe[0] = 0;
1663 swizzle_pipe[1] = 2;
1664 swizzle_pipe[2] = 4;
1665 swizzle_pipe[3] = 1;
1666 swizzle_pipe[4] = 3;
1667 swizzle_pipe[5] = 5;
1668 }
1669 break;
1670 case 8:
1671 if (force_no_swizzle) {
1672 swizzle_pipe[0] = 0;
1673 swizzle_pipe[1] = 1;
1674 swizzle_pipe[2] = 2;
1675 swizzle_pipe[3] = 3;
1676 swizzle_pipe[4] = 4;
1677 swizzle_pipe[5] = 5;
1678 swizzle_pipe[6] = 6;
1679 swizzle_pipe[7] = 7;
1680 } else {
1681 swizzle_pipe[0] = 0;
1682 swizzle_pipe[1] = 2;
1683 swizzle_pipe[2] = 4;
1684 swizzle_pipe[3] = 6;
1685 swizzle_pipe[4] = 1;
1686 swizzle_pipe[5] = 3;
1687 swizzle_pipe[6] = 5;
1688 swizzle_pipe[7] = 7;
1689 }
1690 break;
1691 }
1692
1693 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1694 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1695 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1696
1697 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
1698
1699 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1700 }
1701
1702 return backend_map;
1703}
1704
1705static void evergreen_gpu_init(struct radeon_device *rdev) 1561static void evergreen_gpu_init(struct radeon_device *rdev)
1706{ 1562{
1707 u32 cc_rb_backend_disable = 0; 1563 u32 gb_addr_config;
1708 u32 cc_gc_shader_pipe_config;
1709 u32 gb_addr_config = 0;
1710 u32 mc_shared_chmap, mc_arb_ramcfg; 1564 u32 mc_shared_chmap, mc_arb_ramcfg;
1711 u32 gb_backend_map;
1712 u32 grbm_gfx_index;
1713 u32 sx_debug_1; 1565 u32 sx_debug_1;
1714 u32 smx_dc_ctl0; 1566 u32 smx_dc_ctl0;
1715 u32 sq_config; 1567 u32 sq_config;
@@ -1724,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1724 u32 sq_stack_resource_mgmt_3; 1576 u32 sq_stack_resource_mgmt_3;
1725 u32 vgt_cache_invalidation; 1577 u32 vgt_cache_invalidation;
1726 u32 hdp_host_path_cntl, tmp; 1578 u32 hdp_host_path_cntl, tmp;
1579 u32 disabled_rb_mask;
1727 int i, j, num_shader_engines, ps_thread_count; 1580 int i, j, num_shader_engines, ps_thread_count;
1728 1581
1729 switch (rdev->family) { 1582 switch (rdev->family) {
@@ -1748,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1748 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1601 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1749 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1602 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1750 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1603 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1604 gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
1751 break; 1605 break;
1752 case CHIP_JUNIPER: 1606 case CHIP_JUNIPER:
1753 rdev->config.evergreen.num_ses = 1; 1607 rdev->config.evergreen.num_ses = 1;
@@ -1769,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1769 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1623 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1770 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1624 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1771 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1625 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1626 gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
1772 break; 1627 break;
1773 case CHIP_REDWOOD: 1628 case CHIP_REDWOOD:
1774 rdev->config.evergreen.num_ses = 1; 1629 rdev->config.evergreen.num_ses = 1;
@@ -1790,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1790 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1645 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1791 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1646 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1792 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1647 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1648 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1793 break; 1649 break;
1794 case CHIP_CEDAR: 1650 case CHIP_CEDAR:
1795 default: 1651 default:
@@ -1812,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1812 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1668 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1813 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1669 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1814 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1670 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1671 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
1815 break; 1672 break;
1816 case CHIP_PALM: 1673 case CHIP_PALM:
1817 rdev->config.evergreen.num_ses = 1; 1674 rdev->config.evergreen.num_ses = 1;
@@ -1833,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1833 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1690 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1834 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1691 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1835 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1692 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1693 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
1836 break; 1694 break;
1837 case CHIP_SUMO: 1695 case CHIP_SUMO:
1838 rdev->config.evergreen.num_ses = 1; 1696 rdev->config.evergreen.num_ses = 1;
@@ -1860,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1860 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1718 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1861 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1719 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1862 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1720 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1721 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1863 break; 1722 break;
1864 case CHIP_SUMO2: 1723 case CHIP_SUMO2:
1865 rdev->config.evergreen.num_ses = 1; 1724 rdev->config.evergreen.num_ses = 1;
@@ -1881,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1881 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1740 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1882 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1741 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1883 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1742 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1743 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1884 break; 1744 break;
1885 case CHIP_BARTS: 1745 case CHIP_BARTS:
1886 rdev->config.evergreen.num_ses = 2; 1746 rdev->config.evergreen.num_ses = 2;
@@ -1902,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1902 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1762 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1903 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1763 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1904 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1764 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1765 gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
1905 break; 1766 break;
1906 case CHIP_TURKS: 1767 case CHIP_TURKS:
1907 rdev->config.evergreen.num_ses = 1; 1768 rdev->config.evergreen.num_ses = 1;
@@ -1923,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1923 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1784 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1924 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1785 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1925 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1786 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1787 gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
1926 break; 1788 break;
1927 case CHIP_CAICOS: 1789 case CHIP_CAICOS:
1928 rdev->config.evergreen.num_ses = 1; 1790 rdev->config.evergreen.num_ses = 1;
@@ -1944,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1944 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1806 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1945 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1807 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1946 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1808 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1809 gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
1947 break; 1810 break;
1948 } 1811 }
1949 1812
@@ -1960,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1960 1823
1961 evergreen_fix_pci_max_read_req_size(rdev); 1824 evergreen_fix_pci_max_read_req_size(rdev);
1962 1825
1963 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
1964
1965 cc_gc_shader_pipe_config |=
1966 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
1967 & EVERGREEN_MAX_PIPES_MASK);
1968 cc_gc_shader_pipe_config |=
1969 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
1970 & EVERGREEN_MAX_SIMDS_MASK);
1971
1972 cc_rb_backend_disable =
1973 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
1974 & EVERGREEN_MAX_BACKENDS_MASK);
1975
1976
1977 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1826 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1978 if ((rdev->family == CHIP_PALM) || 1827 if ((rdev->family == CHIP_PALM) ||
1979 (rdev->family == CHIP_SUMO) || 1828 (rdev->family == CHIP_SUMO) ||
@@ -1982,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1982 else 1831 else
1983 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1832 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1984 1833
1985 switch (rdev->config.evergreen.max_tile_pipes) {
1986 case 1:
1987 default:
1988 gb_addr_config |= NUM_PIPES(0);
1989 break;
1990 case 2:
1991 gb_addr_config |= NUM_PIPES(1);
1992 break;
1993 case 4:
1994 gb_addr_config |= NUM_PIPES(2);
1995 break;
1996 case 8:
1997 gb_addr_config |= NUM_PIPES(3);
1998 break;
1999 }
2000
2001 gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2002 gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
2003 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
2004 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
2005 gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
2006 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
2007
2008 if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
2009 gb_addr_config |= ROW_SIZE(2);
2010 else
2011 gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
2012
2013 if (rdev->ddev->pdev->device == 0x689e) {
2014 u32 efuse_straps_4;
2015 u32 efuse_straps_3;
2016 u8 efuse_box_bit_131_124;
2017
2018 WREG32(RCU_IND_INDEX, 0x204);
2019 efuse_straps_4 = RREG32(RCU_IND_DATA);
2020 WREG32(RCU_IND_INDEX, 0x203);
2021 efuse_straps_3 = RREG32(RCU_IND_DATA);
2022 efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
2023
2024 switch(efuse_box_bit_131_124) {
2025 case 0x00:
2026 gb_backend_map = 0x76543210;
2027 break;
2028 case 0x55:
2029 gb_backend_map = 0x77553311;
2030 break;
2031 case 0x56:
2032 gb_backend_map = 0x77553300;
2033 break;
2034 case 0x59:
2035 gb_backend_map = 0x77552211;
2036 break;
2037 case 0x66:
2038 gb_backend_map = 0x77443300;
2039 break;
2040 case 0x99:
2041 gb_backend_map = 0x66552211;
2042 break;
2043 case 0x5a:
2044 gb_backend_map = 0x77552200;
2045 break;
2046 case 0xaa:
2047 gb_backend_map = 0x66442200;
2048 break;
2049 case 0x95:
2050 gb_backend_map = 0x66553311;
2051 break;
2052 default:
2053 DRM_ERROR("bad backend map, using default\n");
2054 gb_backend_map =
2055 evergreen_get_tile_pipe_to_backend_map(rdev,
2056 rdev->config.evergreen.max_tile_pipes,
2057 rdev->config.evergreen.max_backends,
2058 ((EVERGREEN_MAX_BACKENDS_MASK <<
2059 rdev->config.evergreen.max_backends) &
2060 EVERGREEN_MAX_BACKENDS_MASK));
2061 break;
2062 }
2063 } else if (rdev->ddev->pdev->device == 0x68b9) {
2064 u32 efuse_straps_3;
2065 u8 efuse_box_bit_127_124;
2066
2067 WREG32(RCU_IND_INDEX, 0x203);
2068 efuse_straps_3 = RREG32(RCU_IND_DATA);
2069 efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
2070
2071 switch(efuse_box_bit_127_124) {
2072 case 0x0:
2073 gb_backend_map = 0x00003210;
2074 break;
2075 case 0x5:
2076 case 0x6:
2077 case 0x9:
2078 case 0xa:
2079 gb_backend_map = 0x00003311;
2080 break;
2081 default:
2082 DRM_ERROR("bad backend map, using default\n");
2083 gb_backend_map =
2084 evergreen_get_tile_pipe_to_backend_map(rdev,
2085 rdev->config.evergreen.max_tile_pipes,
2086 rdev->config.evergreen.max_backends,
2087 ((EVERGREEN_MAX_BACKENDS_MASK <<
2088 rdev->config.evergreen.max_backends) &
2089 EVERGREEN_MAX_BACKENDS_MASK));
2090 break;
2091 }
2092 } else {
2093 switch (rdev->family) {
2094 case CHIP_CYPRESS:
2095 case CHIP_HEMLOCK:
2096 case CHIP_BARTS:
2097 gb_backend_map = 0x66442200;
2098 break;
2099 case CHIP_JUNIPER:
2100 gb_backend_map = 0x00002200;
2101 break;
2102 default:
2103 gb_backend_map =
2104 evergreen_get_tile_pipe_to_backend_map(rdev,
2105 rdev->config.evergreen.max_tile_pipes,
2106 rdev->config.evergreen.max_backends,
2107 ((EVERGREEN_MAX_BACKENDS_MASK <<
2108 rdev->config.evergreen.max_backends) &
2109 EVERGREEN_MAX_BACKENDS_MASK));
2110 }
2111 }
2112
2113 /* setup tiling info dword. gb_addr_config is not adequate since it does 1834 /* setup tiling info dword. gb_addr_config is not adequate since it does
2114 * not have bank info, so create a custom tiling dword. 1835 * not have bank info, so create a custom tiling dword.
2115 * bits 3:0 num_pipes 1836 * bits 3:0 num_pipes
@@ -2136,45 +1857,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2136 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 1857 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
2137 if (rdev->flags & RADEON_IS_IGP) 1858 if (rdev->flags & RADEON_IS_IGP)
2138 rdev->config.evergreen.tile_config |= 1 << 4; 1859 rdev->config.evergreen.tile_config |= 1 << 4;
2139 else 1860 else {
2140 rdev->config.evergreen.tile_config |= 1861 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
2141 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1862 rdev->config.evergreen.tile_config |= 1 << 4;
2142 rdev->config.evergreen.tile_config |= 1863 else
2143 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; 1864 rdev->config.evergreen.tile_config |= 0 << 4;
1865 }
1866 rdev->config.evergreen.tile_config |= 0 << 8;
2144 rdev->config.evergreen.tile_config |= 1867 rdev->config.evergreen.tile_config |=
2145 ((gb_addr_config & 0x30000000) >> 28) << 12; 1868 ((gb_addr_config & 0x30000000) >> 28) << 12;
2146 1869
2147 rdev->config.evergreen.backend_map = gb_backend_map; 1870 num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
2148 WREG32(GB_BACKEND_MAP, gb_backend_map);
2149 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2150 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2151 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2152
2153 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
2154 grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
2155 1871
2156 for (i = 0; i < rdev->config.evergreen.num_ses; i++) { 1872 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
2157 u32 rb = cc_rb_backend_disable | (0xf0 << 16); 1873 u32 efuse_straps_4;
2158 u32 sp = cc_gc_shader_pipe_config; 1874 u32 efuse_straps_3;
2159 u32 gfx = grbm_gfx_index | SE_INDEX(i);
2160 1875
2161 if (i == num_shader_engines) { 1876 WREG32(RCU_IND_INDEX, 0x204);
2162 rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); 1877 efuse_straps_4 = RREG32(RCU_IND_DATA);
2163 sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); 1878 WREG32(RCU_IND_INDEX, 0x203);
1879 efuse_straps_3 = RREG32(RCU_IND_DATA);
1880 tmp = (((efuse_straps_4 & 0xf) << 4) |
1881 ((efuse_straps_3 & 0xf0000000) >> 28));
1882 } else {
1883 tmp = 0;
1884 for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
1885 u32 rb_disable_bitmap;
1886
1887 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1888 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1889 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
1890 tmp <<= 4;
1891 tmp |= rb_disable_bitmap;
2164 } 1892 }
1893 }
1894 /* enabled rb are just the one not disabled :) */
1895 disabled_rb_mask = tmp;
2165 1896
2166 WREG32(GRBM_GFX_INDEX, gfx); 1897 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2167 WREG32(RLC_GFX_INDEX, gfx); 1898 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2168 1899
2169 WREG32(CC_RB_BACKEND_DISABLE, rb); 1900 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2170 WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); 1901 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2171 WREG32(GC_USER_RB_BACKEND_DISABLE, rb); 1902 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2172 WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
2173 }
2174 1903
2175 grbm_gfx_index |= SE_BROADCAST_WRITES; 1904 tmp = gb_addr_config & NUM_PIPES_MASK;
2176 WREG32(GRBM_GFX_INDEX, grbm_gfx_index); 1905 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
2177 WREG32(RLC_GFX_INDEX, grbm_gfx_index); 1906 EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
1907 WREG32(GB_BACKEND_MAP, tmp);
2178 1908
2179 WREG32(CGTS_SYS_TCC_DISABLE, 0); 1909 WREG32(CGTS_SYS_TCC_DISABLE, 0);
2180 WREG32(CGTS_TCC_DISABLE, 0); 1910 WREG32(CGTS_TCC_DISABLE, 0);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 79130bfd1d6f..2773039b4902 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -37,6 +37,15 @@
37#define EVERGREEN_MAX_PIPES_MASK 0xFF 37#define EVERGREEN_MAX_PIPES_MASK 0xFF
38#define EVERGREEN_MAX_LDS_NUM 0xFFFF 38#define EVERGREEN_MAX_LDS_NUM 0xFFFF
39 39
40#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003
41#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003
42#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
43#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002
44#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002
45#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
46#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
47#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
48
40/* Registers */ 49/* Registers */
41 50
42#define RCU_IND_INDEX 0x100 51#define RCU_IND_INDEX 0x100
@@ -54,6 +63,7 @@
54#define BACKEND_DISABLE(x) ((x) << 16) 63#define BACKEND_DISABLE(x) ((x) << 16)
55#define GB_ADDR_CONFIG 0x98F8 64#define GB_ADDR_CONFIG 0x98F8
56#define NUM_PIPES(x) ((x) << 0) 65#define NUM_PIPES(x) ((x) << 0)
66#define NUM_PIPES_MASK 0x0000000f
57#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) 67#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
58#define BANK_INTERLEAVE_SIZE(x) ((x) << 8) 68#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
59#define NUM_SHADER_ENGINES(x) ((x) << 12) 69#define NUM_SHADER_ENGINES(x) ((x) << 12)
@@ -452,6 +462,7 @@
452#define MC_VM_MD_L1_TLB0_CNTL 0x2654 462#define MC_VM_MD_L1_TLB0_CNTL 0x2654
453#define MC_VM_MD_L1_TLB1_CNTL 0x2658 463#define MC_VM_MD_L1_TLB1_CNTL 0x2658
454#define MC_VM_MD_L1_TLB2_CNTL 0x265C 464#define MC_VM_MD_L1_TLB2_CNTL 0x265C
465#define MC_VM_MD_L1_TLB3_CNTL 0x2698
455 466
456#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C 467#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
457#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 468#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index ce4e7cc6c905..3186522a4458 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -417,215 +417,17 @@ out:
417/* 417/*
418 * Core functions 418 * Core functions
419 */ 419 */
420static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
421 u32 num_tile_pipes,
422 u32 num_backends_per_asic,
423 u32 *backend_disable_mask_per_asic,
424 u32 num_shader_engines)
425{
426 u32 backend_map = 0;
427 u32 enabled_backends_mask = 0;
428 u32 enabled_backends_count = 0;
429 u32 num_backends_per_se;
430 u32 cur_pipe;
431 u32 swizzle_pipe[CAYMAN_MAX_PIPES];
432 u32 cur_backend = 0;
433 u32 i;
434 bool force_no_swizzle;
435
436 /* force legal values */
437 if (num_tile_pipes < 1)
438 num_tile_pipes = 1;
439 if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
440 num_tile_pipes = rdev->config.cayman.max_tile_pipes;
441 if (num_shader_engines < 1)
442 num_shader_engines = 1;
443 if (num_shader_engines > rdev->config.cayman.max_shader_engines)
444 num_shader_engines = rdev->config.cayman.max_shader_engines;
445 if (num_backends_per_asic < num_shader_engines)
446 num_backends_per_asic = num_shader_engines;
447 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
448 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
449
450 /* make sure we have the same number of backends per se */
451 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
452 /* set up the number of backends per se */
453 num_backends_per_se = num_backends_per_asic / num_shader_engines;
454 if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
455 num_backends_per_se = rdev->config.cayman.max_backends_per_se;
456 num_backends_per_asic = num_backends_per_se * num_shader_engines;
457 }
458
459 /* create enable mask and count for enabled backends */
460 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
461 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
462 enabled_backends_mask |= (1 << i);
463 ++enabled_backends_count;
464 }
465 if (enabled_backends_count == num_backends_per_asic)
466 break;
467 }
468
469 /* force the backends mask to match the current number of backends */
470 if (enabled_backends_count != num_backends_per_asic) {
471 u32 this_backend_enabled;
472 u32 shader_engine;
473 u32 backend_per_se;
474
475 enabled_backends_mask = 0;
476 enabled_backends_count = 0;
477 *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
478 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
479 /* calc the current se */
480 shader_engine = i / rdev->config.cayman.max_backends_per_se;
481 /* calc the backend per se */
482 backend_per_se = i % rdev->config.cayman.max_backends_per_se;
483 /* default to not enabled */
484 this_backend_enabled = 0;
485 if ((shader_engine < num_shader_engines) &&
486 (backend_per_se < num_backends_per_se))
487 this_backend_enabled = 1;
488 if (this_backend_enabled) {
489 enabled_backends_mask |= (1 << i);
490 *backend_disable_mask_per_asic &= ~(1 << i);
491 ++enabled_backends_count;
492 }
493 }
494 }
495
496
497 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
498 switch (rdev->family) {
499 case CHIP_CAYMAN:
500 case CHIP_ARUBA:
501 force_no_swizzle = true;
502 break;
503 default:
504 force_no_swizzle = false;
505 break;
506 }
507 if (force_no_swizzle) {
508 bool last_backend_enabled = false;
509
510 force_no_swizzle = false;
511 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
512 if (((enabled_backends_mask >> i) & 1) == 1) {
513 if (last_backend_enabled)
514 force_no_swizzle = true;
515 last_backend_enabled = true;
516 } else
517 last_backend_enabled = false;
518 }
519 }
520
521 switch (num_tile_pipes) {
522 case 1:
523 case 3:
524 case 5:
525 case 7:
526 DRM_ERROR("odd number of pipes!\n");
527 break;
528 case 2:
529 swizzle_pipe[0] = 0;
530 swizzle_pipe[1] = 1;
531 break;
532 case 4:
533 if (force_no_swizzle) {
534 swizzle_pipe[0] = 0;
535 swizzle_pipe[1] = 1;
536 swizzle_pipe[2] = 2;
537 swizzle_pipe[3] = 3;
538 } else {
539 swizzle_pipe[0] = 0;
540 swizzle_pipe[1] = 2;
541 swizzle_pipe[2] = 1;
542 swizzle_pipe[3] = 3;
543 }
544 break;
545 case 6:
546 if (force_no_swizzle) {
547 swizzle_pipe[0] = 0;
548 swizzle_pipe[1] = 1;
549 swizzle_pipe[2] = 2;
550 swizzle_pipe[3] = 3;
551 swizzle_pipe[4] = 4;
552 swizzle_pipe[5] = 5;
553 } else {
554 swizzle_pipe[0] = 0;
555 swizzle_pipe[1] = 2;
556 swizzle_pipe[2] = 4;
557 swizzle_pipe[3] = 1;
558 swizzle_pipe[4] = 3;
559 swizzle_pipe[5] = 5;
560 }
561 break;
562 case 8:
563 if (force_no_swizzle) {
564 swizzle_pipe[0] = 0;
565 swizzle_pipe[1] = 1;
566 swizzle_pipe[2] = 2;
567 swizzle_pipe[3] = 3;
568 swizzle_pipe[4] = 4;
569 swizzle_pipe[5] = 5;
570 swizzle_pipe[6] = 6;
571 swizzle_pipe[7] = 7;
572 } else {
573 swizzle_pipe[0] = 0;
574 swizzle_pipe[1] = 2;
575 swizzle_pipe[2] = 4;
576 swizzle_pipe[3] = 6;
577 swizzle_pipe[4] = 1;
578 swizzle_pipe[5] = 3;
579 swizzle_pipe[6] = 5;
580 swizzle_pipe[7] = 7;
581 }
582 break;
583 }
584
585 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
586 while (((1 << cur_backend) & enabled_backends_mask) == 0)
587 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
588
589 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
590
591 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
592 }
593
594 return backend_map;
595}
596
597static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
598 u32 disable_mask_per_se,
599 u32 max_disable_mask_per_se,
600 u32 num_shader_engines)
601{
602 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
603 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
604
605 if (num_shader_engines == 1)
606 return disable_mask_per_asic;
607 else if (num_shader_engines == 2)
608 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
609 else
610 return 0xffffffff;
611}
612
613static void cayman_gpu_init(struct radeon_device *rdev) 420static void cayman_gpu_init(struct radeon_device *rdev)
614{ 421{
615 u32 cc_rb_backend_disable = 0;
616 u32 cc_gc_shader_pipe_config;
617 u32 gb_addr_config = 0; 422 u32 gb_addr_config = 0;
618 u32 mc_shared_chmap, mc_arb_ramcfg; 423 u32 mc_shared_chmap, mc_arb_ramcfg;
619 u32 gb_backend_map;
620 u32 cgts_tcc_disable; 424 u32 cgts_tcc_disable;
621 u32 sx_debug_1; 425 u32 sx_debug_1;
622 u32 smx_dc_ctl0; 426 u32 smx_dc_ctl0;
623 u32 gc_user_shader_pipe_config;
624 u32 gc_user_rb_backend_disable;
625 u32 cgts_user_tcc_disable;
626 u32 cgts_sm_ctrl_reg; 427 u32 cgts_sm_ctrl_reg;
627 u32 hdp_host_path_cntl; 428 u32 hdp_host_path_cntl;
628 u32 tmp; 429 u32 tmp;
430 u32 disabled_rb_mask;
629 int i, j; 431 int i, j;
630 432
631 switch (rdev->family) { 433 switch (rdev->family) {
@@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
650 rdev->config.cayman.sc_prim_fifo_size = 0x100; 452 rdev->config.cayman.sc_prim_fifo_size = 0x100;
651 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 453 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
652 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 454 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
455 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
653 break; 456 break;
654 case CHIP_ARUBA: 457 case CHIP_ARUBA:
655 default: 458 default:
@@ -657,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
657 rdev->config.cayman.max_pipes_per_simd = 4; 460 rdev->config.cayman.max_pipes_per_simd = 4;
658 rdev->config.cayman.max_tile_pipes = 2; 461 rdev->config.cayman.max_tile_pipes = 2;
659 if ((rdev->pdev->device == 0x9900) || 462 if ((rdev->pdev->device == 0x9900) ||
660 (rdev->pdev->device == 0x9901)) { 463 (rdev->pdev->device == 0x9901) ||
464 (rdev->pdev->device == 0x9905) ||
465 (rdev->pdev->device == 0x9906) ||
466 (rdev->pdev->device == 0x9907) ||
467 (rdev->pdev->device == 0x9908) ||
468 (rdev->pdev->device == 0x9909) ||
469 (rdev->pdev->device == 0x9910) ||
470 (rdev->pdev->device == 0x9917)) {
661 rdev->config.cayman.max_simds_per_se = 6; 471 rdev->config.cayman.max_simds_per_se = 6;
662 rdev->config.cayman.max_backends_per_se = 2; 472 rdev->config.cayman.max_backends_per_se = 2;
663 } else if ((rdev->pdev->device == 0x9903) || 473 } else if ((rdev->pdev->device == 0x9903) ||
664 (rdev->pdev->device == 0x9904)) { 474 (rdev->pdev->device == 0x9904) ||
475 (rdev->pdev->device == 0x990A) ||
476 (rdev->pdev->device == 0x9913) ||
477 (rdev->pdev->device == 0x9918)) {
665 rdev->config.cayman.max_simds_per_se = 4; 478 rdev->config.cayman.max_simds_per_se = 4;
666 rdev->config.cayman.max_backends_per_se = 2; 479 rdev->config.cayman.max_backends_per_se = 2;
667 } else if ((rdev->pdev->device == 0x9990) || 480 } else if ((rdev->pdev->device == 0x9919) ||
668 (rdev->pdev->device == 0x9991)) { 481 (rdev->pdev->device == 0x9990) ||
482 (rdev->pdev->device == 0x9991) ||
483 (rdev->pdev->device == 0x9994) ||
484 (rdev->pdev->device == 0x99A0)) {
669 rdev->config.cayman.max_simds_per_se = 3; 485 rdev->config.cayman.max_simds_per_se = 3;
670 rdev->config.cayman.max_backends_per_se = 1; 486 rdev->config.cayman.max_backends_per_se = 1;
671 } else { 487 } else {
@@ -687,6 +503,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
687 rdev->config.cayman.sc_prim_fifo_size = 0x40; 503 rdev->config.cayman.sc_prim_fifo_size = 0x40;
688 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 504 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
689 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 505 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
506 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
690 break; 507 break;
691 } 508 }
692 509
@@ -706,39 +523,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
706 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 523 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
707 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 524 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
708 525
709 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
710 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
711 cgts_tcc_disable = 0xffff0000;
712 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
713 cgts_tcc_disable &= ~(1 << (16 + i));
714 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
715 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
716 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
717
718 rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
719 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
720 rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
721 rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
722 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
723 rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
724 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
725 rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
726 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
727 rdev->config.cayman.backend_disable_mask_per_asic =
728 cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
729 rdev->config.cayman.num_shader_engines);
730 rdev->config.cayman.backend_map =
731 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
732 rdev->config.cayman.num_backends_per_se *
733 rdev->config.cayman.num_shader_engines,
734 &rdev->config.cayman.backend_disable_mask_per_asic,
735 rdev->config.cayman.num_shader_engines);
736 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
737 rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
738 tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
739 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
740 if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
741 rdev->config.cayman.mem_max_burst_length_bytes = 512;
742 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 526 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
743 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 527 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
744 if (rdev->config.cayman.mem_row_size_in_kb > 4) 528 if (rdev->config.cayman.mem_row_size_in_kb > 4)
@@ -748,73 +532,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
748 rdev->config.cayman.num_gpus = 1; 532 rdev->config.cayman.num_gpus = 1;
749 rdev->config.cayman.multi_gpu_tile_size = 64; 533 rdev->config.cayman.multi_gpu_tile_size = 64;
750 534
751 //gb_addr_config = 0x02011003
752#if 0
753 gb_addr_config = RREG32(GB_ADDR_CONFIG);
754#else
755 gb_addr_config = 0;
756 switch (rdev->config.cayman.num_tile_pipes) {
757 case 1:
758 default:
759 gb_addr_config |= NUM_PIPES(0);
760 break;
761 case 2:
762 gb_addr_config |= NUM_PIPES(1);
763 break;
764 case 4:
765 gb_addr_config |= NUM_PIPES(2);
766 break;
767 case 8:
768 gb_addr_config |= NUM_PIPES(3);
769 break;
770 }
771
772 tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
773 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
774 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
775 tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
776 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
777 switch (rdev->config.cayman.num_gpus) {
778 case 1:
779 default:
780 gb_addr_config |= NUM_GPUS(0);
781 break;
782 case 2:
783 gb_addr_config |= NUM_GPUS(1);
784 break;
785 case 4:
786 gb_addr_config |= NUM_GPUS(2);
787 break;
788 }
789 switch (rdev->config.cayman.multi_gpu_tile_size) {
790 case 16:
791 gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
792 break;
793 case 32:
794 default:
795 gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
796 break;
797 case 64:
798 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
799 break;
800 case 128:
801 gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
802 break;
803 }
804 switch (rdev->config.cayman.mem_row_size_in_kb) {
805 case 1:
806 default:
807 gb_addr_config |= ROW_SIZE(0);
808 break;
809 case 2:
810 gb_addr_config |= ROW_SIZE(1);
811 break;
812 case 4:
813 gb_addr_config |= ROW_SIZE(2);
814 break;
815 }
816#endif
817
818 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 535 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
819 rdev->config.cayman.num_tile_pipes = (1 << tmp); 536 rdev->config.cayman.num_tile_pipes = (1 << tmp);
820 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 537 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
@@ -828,17 +545,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
828 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 545 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
829 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 546 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
830 547
831 //gb_backend_map = 0x76541032; 548
832#if 0
833 gb_backend_map = RREG32(GB_BACKEND_MAP);
834#else
835 gb_backend_map =
836 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
837 rdev->config.cayman.num_backends_per_se *
838 rdev->config.cayman.num_shader_engines,
839 &rdev->config.cayman.backend_disable_mask_per_asic,
840 rdev->config.cayman.num_shader_engines);
841#endif
842 /* setup tiling info dword. gb_addr_config is not adequate since it does 549 /* setup tiling info dword. gb_addr_config is not adequate since it does
843 * not have bank info, so create a custom tiling dword. 550 * not have bank info, so create a custom tiling dword.
844 * bits 3:0 num_pipes 551 * bits 3:0 num_pipes
@@ -866,33 +573,49 @@ static void cayman_gpu_init(struct radeon_device *rdev)
866 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 573 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
867 if (rdev->flags & RADEON_IS_IGP) 574 if (rdev->flags & RADEON_IS_IGP)
868 rdev->config.cayman.tile_config |= 1 << 4; 575 rdev->config.cayman.tile_config |= 1 << 4;
869 else 576 else {
870 rdev->config.cayman.tile_config |= 577 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
871 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 578 rdev->config.cayman.tile_config |= 1 << 4;
579 else
580 rdev->config.cayman.tile_config |= 0 << 4;
581 }
872 rdev->config.cayman.tile_config |= 582 rdev->config.cayman.tile_config |=
873 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 583 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
874 rdev->config.cayman.tile_config |= 584 rdev->config.cayman.tile_config |=
875 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 585 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
876 586
877 rdev->config.cayman.backend_map = gb_backend_map; 587 tmp = 0;
878 WREG32(GB_BACKEND_MAP, gb_backend_map); 588 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
589 u32 rb_disable_bitmap;
590
591 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
592 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
593 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
594 tmp <<= 4;
595 tmp |= rb_disable_bitmap;
596 }
597 /* enabled rb are just the one not disabled :) */
598 disabled_rb_mask = tmp;
599
600 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
601 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
602
879 WREG32(GB_ADDR_CONFIG, gb_addr_config); 603 WREG32(GB_ADDR_CONFIG, gb_addr_config);
880 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 604 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
881 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 605 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
882 606
883 /* primary versions */ 607 tmp = gb_addr_config & NUM_PIPES_MASK;
884 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 608 tmp = r6xx_remap_render_backend(rdev, tmp,
885 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 609 rdev->config.cayman.max_backends_per_se *
886 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 610 rdev->config.cayman.max_shader_engines,
611 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
612 WREG32(GB_BACKEND_MAP, tmp);
887 613
614 cgts_tcc_disable = 0xffff0000;
615 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
616 cgts_tcc_disable &= ~(1 << (16 + i));
888 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 617 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
889 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 618 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
890
891 /* user versions */
892 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
893 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
894 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
895
896 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 619 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
897 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 620 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
898 621
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2aa7046ada56..a0b98066e207 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -41,6 +41,9 @@
41#define CAYMAN_MAX_TCC 16 41#define CAYMAN_MAX_TCC 16
42#define CAYMAN_MAX_TCC_MASK 0xFF 42#define CAYMAN_MAX_TCC_MASK 0xFF
43 43
44#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
45#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
46
44#define DMIF_ADDR_CONFIG 0xBD4 47#define DMIF_ADDR_CONFIG 0xBD4
45#define SRBM_GFX_CNTL 0x0E44 48#define SRBM_GFX_CNTL 0x0E44
46#define RINGID(x) (((x) & 0x3) << 0) 49#define RINGID(x) (((x) & 0x3) << 0)
@@ -148,6 +151,8 @@
148#define CGTS_SYS_TCC_DISABLE 0x3F90 151#define CGTS_SYS_TCC_DISABLE 0x3F90
149#define CGTS_USER_SYS_TCC_DISABLE 0x3F94 152#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
150 153
154#define RLC_GFX_INDEX 0x3FC4
155
151#define CONFIG_MEMSIZE 0x5428 156#define CONFIG_MEMSIZE 0x5428
152 157
153#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 158#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
@@ -212,6 +217,12 @@
212#define SOFT_RESET_VGT (1 << 14) 217#define SOFT_RESET_VGT (1 << 14)
213#define SOFT_RESET_IA (1 << 15) 218#define SOFT_RESET_IA (1 << 15)
214 219
220#define GRBM_GFX_INDEX 0x802C
221#define INSTANCE_INDEX(x) ((x) << 0)
222#define SE_INDEX(x) ((x) << 16)
223#define INSTANCE_BROADCAST_WRITES (1 << 30)
224#define SE_BROADCAST_WRITES (1 << 31)
225
215#define SCRATCH_REG0 0x8500 226#define SCRATCH_REG0 0x8500
216#define SCRATCH_REG1 0x8504 227#define SCRATCH_REG1 0x8504
217#define SCRATCH_REG2 0x8508 228#define SCRATCH_REG2 0x8508
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f388a1d73b63..f30dc95f83b1 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev)
1376 return r600_gpu_soft_reset(rdev); 1376 return r600_gpu_soft_reset(rdev);
1377} 1377}
1378 1378
1379static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 1379u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1380 u32 num_backends, 1380 u32 tiling_pipe_num,
1381 u32 backend_disable_mask) 1381 u32 max_rb_num,
1382{ 1382 u32 total_max_rb_num,
1383 u32 backend_map = 0; 1383 u32 disabled_rb_mask)
1384 u32 enabled_backends_mask; 1384{
1385 u32 enabled_backends_count; 1385 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1386 u32 cur_pipe; 1386 u32 pipe_rb_ratio, pipe_rb_remain;
1387 u32 swizzle_pipe[R6XX_MAX_PIPES]; 1387 u32 data = 0, mask = 1 << (max_rb_num - 1);
1388 u32 cur_backend; 1388 unsigned i, j;
1389 u32 i; 1389
1390 1390 /* mask out the RBs that don't exist on that asic */
1391 if (num_tile_pipes > R6XX_MAX_PIPES) 1391 disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
1392 num_tile_pipes = R6XX_MAX_PIPES; 1392
1393 if (num_tile_pipes < 1) 1393 rendering_pipe_num = 1 << tiling_pipe_num;
1394 num_tile_pipes = 1; 1394 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1395 if (num_backends > R6XX_MAX_BACKENDS) 1395 BUG_ON(rendering_pipe_num < req_rb_num);
1396 num_backends = R6XX_MAX_BACKENDS; 1396
1397 if (num_backends < 1) 1397 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1398 num_backends = 1; 1398 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1399 1399
1400 enabled_backends_mask = 0; 1400 if (rdev->family <= CHIP_RV740) {
1401 enabled_backends_count = 0; 1401 /* r6xx/r7xx */
1402 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) { 1402 rb_num_width = 2;
1403 if (((backend_disable_mask >> i) & 1) == 0) { 1403 } else {
1404 enabled_backends_mask |= (1 << i); 1404 /* eg+ */
1405 ++enabled_backends_count; 1405 rb_num_width = 4;
1406 }
1407 if (enabled_backends_count == num_backends)
1408 break;
1409 }
1410
1411 if (enabled_backends_count == 0) {
1412 enabled_backends_mask = 1;
1413 enabled_backends_count = 1;
1414 }
1415
1416 if (enabled_backends_count != num_backends)
1417 num_backends = enabled_backends_count;
1418
1419 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1420 switch (num_tile_pipes) {
1421 case 1:
1422 swizzle_pipe[0] = 0;
1423 break;
1424 case 2:
1425 swizzle_pipe[0] = 0;
1426 swizzle_pipe[1] = 1;
1427 break;
1428 case 3:
1429 swizzle_pipe[0] = 0;
1430 swizzle_pipe[1] = 1;
1431 swizzle_pipe[2] = 2;
1432 break;
1433 case 4:
1434 swizzle_pipe[0] = 0;
1435 swizzle_pipe[1] = 1;
1436 swizzle_pipe[2] = 2;
1437 swizzle_pipe[3] = 3;
1438 break;
1439 case 5:
1440 swizzle_pipe[0] = 0;
1441 swizzle_pipe[1] = 1;
1442 swizzle_pipe[2] = 2;
1443 swizzle_pipe[3] = 3;
1444 swizzle_pipe[4] = 4;
1445 break;
1446 case 6:
1447 swizzle_pipe[0] = 0;
1448 swizzle_pipe[1] = 2;
1449 swizzle_pipe[2] = 4;
1450 swizzle_pipe[3] = 5;
1451 swizzle_pipe[4] = 1;
1452 swizzle_pipe[5] = 3;
1453 break;
1454 case 7:
1455 swizzle_pipe[0] = 0;
1456 swizzle_pipe[1] = 2;
1457 swizzle_pipe[2] = 4;
1458 swizzle_pipe[3] = 6;
1459 swizzle_pipe[4] = 1;
1460 swizzle_pipe[5] = 3;
1461 swizzle_pipe[6] = 5;
1462 break;
1463 case 8:
1464 swizzle_pipe[0] = 0;
1465 swizzle_pipe[1] = 2;
1466 swizzle_pipe[2] = 4;
1467 swizzle_pipe[3] = 6;
1468 swizzle_pipe[4] = 1;
1469 swizzle_pipe[5] = 3;
1470 swizzle_pipe[6] = 5;
1471 swizzle_pipe[7] = 7;
1472 break;
1473 } 1406 }
1474 1407
1475 cur_backend = 0; 1408 for (i = 0; i < max_rb_num; i++) {
1476 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 1409 if (!(mask & disabled_rb_mask)) {
1477 while (((1 << cur_backend) & enabled_backends_mask) == 0) 1410 for (j = 0; j < pipe_rb_ratio; j++) {
1478 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; 1411 data <<= rb_num_width;
1479 1412 data |= max_rb_num - i - 1;
1480 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); 1413 }
1481 1414 if (pipe_rb_remain) {
1482 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; 1415 data <<= rb_num_width;
1416 data |= max_rb_num - i - 1;
1417 pipe_rb_remain--;
1418 }
1419 }
1420 mask >>= 1;
1483 } 1421 }
1484 1422
1485 return backend_map; 1423 return data;
1486} 1424}
1487 1425
1488int r600_count_pipe_bits(uint32_t val) 1426int r600_count_pipe_bits(uint32_t val)
@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev)
1500{ 1438{
1501 u32 tiling_config; 1439 u32 tiling_config;
1502 u32 ramcfg; 1440 u32 ramcfg;
1503 u32 backend_map;
1504 u32 cc_rb_backend_disable; 1441 u32 cc_rb_backend_disable;
1505 u32 cc_gc_shader_pipe_config; 1442 u32 cc_gc_shader_pipe_config;
1506 u32 tmp; 1443 u32 tmp;
@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev)
1511 u32 sq_thread_resource_mgmt = 0; 1448 u32 sq_thread_resource_mgmt = 0;
1512 u32 sq_stack_resource_mgmt_1 = 0; 1449 u32 sq_stack_resource_mgmt_1 = 0;
1513 u32 sq_stack_resource_mgmt_2 = 0; 1450 u32 sq_stack_resource_mgmt_2 = 0;
1451 u32 disabled_rb_mask;
1514 1452
1515 /* FIXME: implement */ 1453 rdev->config.r600.tiling_group_size = 256;
1516 switch (rdev->family) { 1454 switch (rdev->family) {
1517 case CHIP_R600: 1455 case CHIP_R600:
1518 rdev->config.r600.max_pipes = 4; 1456 rdev->config.r600.max_pipes = 4;
@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev)
1616 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1554 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1617 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1555 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1618 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 1556 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1619 if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) 1557
1620 rdev->config.r600.tiling_group_size = 512;
1621 else
1622 rdev->config.r600.tiling_group_size = 256;
1623 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 1558 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1624 if (tmp > 3) { 1559 if (tmp > 3) {
1625 tiling_config |= ROW_TILING(3); 1560 tiling_config |= ROW_TILING(3);
@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev)
1631 tiling_config |= BANK_SWAPS(1); 1566 tiling_config |= BANK_SWAPS(1);
1632 1567
1633 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; 1568 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1634 cc_rb_backend_disable |= 1569 tmp = R6XX_MAX_BACKENDS -
1635 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK); 1570 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1636 1571 if (tmp < rdev->config.r600.max_backends) {
1637 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; 1572 rdev->config.r600.max_backends = tmp;
1638 cc_gc_shader_pipe_config |= 1573 }
1639 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); 1574
1640 cc_gc_shader_pipe_config |= 1575 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1641 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); 1576 tmp = R6XX_MAX_PIPES -
1642 1577 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1643 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, 1578 if (tmp < rdev->config.r600.max_pipes) {
1644 (R6XX_MAX_BACKENDS - 1579 rdev->config.r600.max_pipes = tmp;
1645 r600_count_pipe_bits((cc_rb_backend_disable & 1580 }
1646 R6XX_MAX_BACKENDS_MASK) >> 16)), 1581 tmp = R6XX_MAX_SIMDS -
1647 (cc_rb_backend_disable >> 16)); 1582 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1583 if (tmp < rdev->config.r600.max_simds) {
1584 rdev->config.r600.max_simds = tmp;
1585 }
1586
1587 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1588 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1589 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1590 R6XX_MAX_BACKENDS, disabled_rb_mask);
1591 tiling_config |= tmp << 16;
1592 rdev->config.r600.backend_map = tmp;
1593
1648 rdev->config.r600.tile_config = tiling_config; 1594 rdev->config.r600.tile_config = tiling_config;
1649 rdev->config.r600.backend_map = backend_map;
1650 tiling_config |= BACKEND_MAP(backend_map);
1651 WREG32(GB_TILING_CONFIG, tiling_config); 1595 WREG32(GB_TILING_CONFIG, tiling_config);
1652 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1596 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1653 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 1597 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1654 1598
1655 /* Setup pipes */
1656 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1657 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1658 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1659
1660 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 1599 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1661 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1600 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1662 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 1601 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -2487,6 +2426,12 @@ int r600_startup(struct radeon_device *rdev)
2487 if (r) 2426 if (r)
2488 return r; 2427 return r;
2489 2428
2429 r = r600_audio_init(rdev);
2430 if (r) {
2431 DRM_ERROR("radeon: audio init failed\n");
2432 return r;
2433 }
2434
2490 return 0; 2435 return 0;
2491} 2436}
2492 2437
@@ -2523,12 +2468,6 @@ int r600_resume(struct radeon_device *rdev)
2523 return r; 2468 return r;
2524 } 2469 }
2525 2470
2526 r = r600_audio_init(rdev);
2527 if (r) {
2528 DRM_ERROR("radeon: audio resume failed\n");
2529 return r;
2530 }
2531
2532 return r; 2471 return r;
2533} 2472}
2534 2473
@@ -2638,9 +2577,6 @@ int r600_init(struct radeon_device *rdev)
2638 rdev->accel_working = false; 2577 rdev->accel_working = false;
2639 } 2578 }
2640 2579
2641 r = r600_audio_init(rdev);
2642 if (r)
2643 return r; /* TODO error handling */
2644 return 0; 2580 return 0;
2645} 2581}
2646 2582
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 7c4fa77f018f..7479a5c503e4 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
192 struct radeon_device *rdev = dev->dev_private; 192 struct radeon_device *rdev = dev->dev_private;
193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
194 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 194 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
195 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
195 int base_rate = 48000; 196 int base_rate = 48000;
196 197
197 switch (radeon_encoder->encoder_id) { 198 switch (radeon_encoder->encoder_id) {
@@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
217 WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10); 218 WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
218 WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071); 219 WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
219 220
220 /* Some magic trigger or src sel? */ 221 /* Select DTO source */
221 WREG32_P(0x5ac, 0x01, ~0x77); 222 WREG32(0x5ac, radeon_crtc->crtc_id);
222 } else { 223 } else {
223 switch (dig->dig_encoder) { 224 switch (dig->dig_encoder) {
224 case 0: 225 case 0:
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 226379e00ac1..969c27529dfe 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -348,7 +348,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
348 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, 348 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
349 HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ 349 HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
350 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ 350 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
351 HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
352 HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ 351 HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
353 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 352 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
354 } 353 }
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 15bd3b216243..a0dbf1fe6a40 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -219,6 +219,8 @@
219#define BACKEND_MAP(x) ((x) << 16) 219#define BACKEND_MAP(x) ((x) << 16)
220 220
221#define GB_TILING_CONFIG 0x98F0 221#define GB_TILING_CONFIG 0x98F0
222#define PIPE_TILING__SHIFT 1
223#define PIPE_TILING__MASK 0x0000000e
222 224
223#define GC_USER_SHADER_PIPE_CONFIG 0x8954 225#define GC_USER_SHADER_PIPE_CONFIG 0x8954
224#define INACTIVE_QD_PIPES(x) ((x) << 8) 226#define INACTIVE_QD_PIPES(x) ((x) << 8)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 492654f8ee74..fefcca55c1eb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -346,6 +346,9 @@ struct radeon_bo {
346 /* Constant after initialization */ 346 /* Constant after initialization */
347 struct radeon_device *rdev; 347 struct radeon_device *rdev;
348 struct drm_gem_object gem_base; 348 struct drm_gem_object gem_base;
349
350 struct ttm_bo_kmap_obj dma_buf_vmap;
351 int vmapping_count;
349}; 352};
350#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 353#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
351 354
@@ -1371,9 +1374,9 @@ struct cayman_asic {
1371 1374
1372struct si_asic { 1375struct si_asic {
1373 unsigned max_shader_engines; 1376 unsigned max_shader_engines;
1374 unsigned max_pipes_per_simd;
1375 unsigned max_tile_pipes; 1377 unsigned max_tile_pipes;
1376 unsigned max_simds_per_se; 1378 unsigned max_cu_per_sh;
1379 unsigned max_sh_per_se;
1377 unsigned max_backends_per_se; 1380 unsigned max_backends_per_se;
1378 unsigned max_texture_channel_caches; 1381 unsigned max_texture_channel_caches;
1379 unsigned max_gprs; 1382 unsigned max_gprs;
@@ -1384,7 +1387,6 @@ struct si_asic {
1384 unsigned sc_hiz_tile_fifo_size; 1387 unsigned sc_hiz_tile_fifo_size;
1385 unsigned sc_earlyz_tile_fifo_size; 1388 unsigned sc_earlyz_tile_fifo_size;
1386 1389
1387 unsigned num_shader_engines;
1388 unsigned num_tile_pipes; 1390 unsigned num_tile_pipes;
1389 unsigned num_backends_per_se; 1391 unsigned num_backends_per_se;
1390 unsigned backend_disable_mask_per_asic; 1392 unsigned backend_disable_mask_per_asic;
@@ -1845,6 +1847,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
1845extern void r600_hdmi_enable(struct drm_encoder *encoder); 1847extern void r600_hdmi_enable(struct drm_encoder *encoder);
1846extern void r600_hdmi_disable(struct drm_encoder *encoder); 1848extern void r600_hdmi_disable(struct drm_encoder *encoder);
1847extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1849extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1850extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1851 u32 tiling_pipe_num,
1852 u32 max_rb_num,
1853 u32 total_max_rb_num,
1854 u32 enabled_rb_mask);
1848 1855
1849/* 1856/*
1850 * evergreen functions used by radeon_encoder.c 1857 * evergreen functions used by radeon_encoder.c
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 0137689ed461..142f89462aa4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -147,6 +147,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
147 sync_to_ring, p->ring); 147 sync_to_ring, p->ring);
148} 148}
149 149
150/* XXX: note that this is called from the legacy UMS CS ioctl as well */
150int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 151int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
151{ 152{
152 struct drm_radeon_cs *cs = data; 153 struct drm_radeon_cs *cs = data;
@@ -245,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
245 } 246 }
246 } 247 }
247 248
248 if ((p->cs_flags & RADEON_CS_USE_VM) && 249 /* these are KMS only */
249 !p->rdev->vm_manager.enabled) { 250 if (p->rdev) {
250 DRM_ERROR("VM not active on asic!\n"); 251 if ((p->cs_flags & RADEON_CS_USE_VM) &&
251 return -EINVAL; 252 !p->rdev->vm_manager.enabled) {
252 } 253 DRM_ERROR("VM not active on asic!\n");
253 254 return -EINVAL;
254 /* we only support VM on SI+ */ 255 }
255 if ((p->rdev->family >= CHIP_TAHITI) &&
256 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
257 DRM_ERROR("VM required on SI+!\n");
258 return -EINVAL;
259 }
260 256
261 if (radeon_cs_get_ring(p, ring, priority)) 257 /* we only support VM on SI+ */
262 return -EINVAL; 258 if ((p->rdev->family >= CHIP_TAHITI) &&
259 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
260 DRM_ERROR("VM required on SI+!\n");
261 return -EINVAL;
262 }
263 263
264 if (radeon_cs_get_ring(p, ring, priority))
265 return -EINVAL;
266 }
264 267
265 /* deal with non-vm */ 268 /* deal with non-vm */
266 if ((p->chunk_ib_idx != -1) && 269 if ((p->chunk_ib_idx != -1) &&
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 79db56e6c2ac..59d44937dd9f 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -476,12 +476,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
476 476
477 mutex_lock(&vm->mutex); 477 mutex_lock(&vm->mutex);
478 if (last_pfn > vm->last_pfn) { 478 if (last_pfn > vm->last_pfn) {
479 /* grow va space 32M by 32M */ 479 /* release mutex and lock in right order */
480 unsigned align = ((32 << 20) >> 12) - 1; 480 mutex_unlock(&vm->mutex);
481 radeon_mutex_lock(&rdev->cs_mutex); 481 radeon_mutex_lock(&rdev->cs_mutex);
482 radeon_vm_unbind_locked(rdev, vm); 482 mutex_lock(&vm->mutex);
483 /* and check again */
484 if (last_pfn > vm->last_pfn) {
485 /* grow va space 32M by 32M */
486 unsigned align = ((32 << 20) >> 12) - 1;
487 radeon_vm_unbind_locked(rdev, vm);
488 vm->last_pfn = (last_pfn + align) & ~align;
489 }
483 radeon_mutex_unlock(&rdev->cs_mutex); 490 radeon_mutex_unlock(&rdev->cs_mutex);
484 vm->last_pfn = (last_pfn + align) & ~align;
485 } 491 }
486 head = &vm->va; 492 head = &vm->va;
487 last_offset = 0; 493 last_offset = 0;
@@ -595,8 +601,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
595 if (bo_va == NULL) 601 if (bo_va == NULL)
596 return 0; 602 return 0;
597 603
598 mutex_lock(&vm->mutex);
599 radeon_mutex_lock(&rdev->cs_mutex); 604 radeon_mutex_lock(&rdev->cs_mutex);
605 mutex_lock(&vm->mutex);
600 radeon_vm_bo_update_pte(rdev, vm, bo, NULL); 606 radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
601 radeon_mutex_unlock(&rdev->cs_mutex); 607 radeon_mutex_unlock(&rdev->cs_mutex);
602 list_del(&bo_va->vm_list); 608 list_del(&bo_va->vm_list);
@@ -641,9 +647,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
641 struct radeon_bo_va *bo_va, *tmp; 647 struct radeon_bo_va *bo_va, *tmp;
642 int r; 648 int r;
643 649
644 mutex_lock(&vm->mutex);
645
646 radeon_mutex_lock(&rdev->cs_mutex); 650 radeon_mutex_lock(&rdev->cs_mutex);
651 mutex_lock(&vm->mutex);
647 radeon_vm_unbind_locked(rdev, vm); 652 radeon_vm_unbind_locked(rdev, vm);
648 radeon_mutex_unlock(&rdev->cs_mutex); 653 radeon_mutex_unlock(&rdev->cs_mutex);
649 654
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f1016a5820d1..5c58d7d90cb2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
273 break; 273 break;
274 case RADEON_INFO_MAX_PIPES: 274 case RADEON_INFO_MAX_PIPES:
275 if (rdev->family >= CHIP_TAHITI) 275 if (rdev->family >= CHIP_TAHITI)
276 value = rdev->config.si.max_pipes_per_simd; 276 value = rdev->config.si.max_cu_per_sh;
277 else if (rdev->family >= CHIP_CAYMAN) 277 else if (rdev->family >= CHIP_CAYMAN)
278 value = rdev->config.cayman.max_pipes_per_simd; 278 value = rdev->config.cayman.max_pipes_per_simd;
279 else if (rdev->family >= CHIP_CEDAR) 279 else if (rdev->family >= CHIP_CEDAR)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index b8f835d8ecb4..8ddab4c76710 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -85,6 +85,47 @@ static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, v
85 85
86} 86}
87 87
88static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
89{
90 return -EINVAL;
91}
92
93static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
94{
95 struct radeon_bo *bo = dma_buf->priv;
96 struct drm_device *dev = bo->rdev->ddev;
97 int ret;
98
99 mutex_lock(&dev->struct_mutex);
100 if (bo->vmapping_count) {
101 bo->vmapping_count++;
102 goto out_unlock;
103 }
104
105 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
106 &bo->dma_buf_vmap);
107 if (ret) {
108 mutex_unlock(&dev->struct_mutex);
109 return ERR_PTR(ret);
110 }
111 bo->vmapping_count = 1;
112out_unlock:
113 mutex_unlock(&dev->struct_mutex);
114 return bo->dma_buf_vmap.virtual;
115}
116
117static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
118{
119 struct radeon_bo *bo = dma_buf->priv;
120 struct drm_device *dev = bo->rdev->ddev;
121
122 mutex_lock(&dev->struct_mutex);
123 bo->vmapping_count--;
124 if (bo->vmapping_count == 0) {
125 ttm_bo_kunmap(&bo->dma_buf_vmap);
126 }
127 mutex_unlock(&dev->struct_mutex);
128}
88const static struct dma_buf_ops radeon_dmabuf_ops = { 129const static struct dma_buf_ops radeon_dmabuf_ops = {
89 .map_dma_buf = radeon_gem_map_dma_buf, 130 .map_dma_buf = radeon_gem_map_dma_buf,
90 .unmap_dma_buf = radeon_gem_unmap_dma_buf, 131 .unmap_dma_buf = radeon_gem_unmap_dma_buf,
@@ -93,6 +134,9 @@ const static struct dma_buf_ops radeon_dmabuf_ops = {
93 .kmap_atomic = radeon_gem_kmap_atomic, 134 .kmap_atomic = radeon_gem_kmap_atomic,
94 .kunmap = radeon_gem_kunmap, 135 .kunmap = radeon_gem_kunmap,
95 .kunmap_atomic = radeon_gem_kunmap_atomic, 136 .kunmap_atomic = radeon_gem_kunmap_atomic,
137 .mmap = radeon_gem_prime_mmap,
138 .vmap = radeon_gem_prime_vmap,
139 .vunmap = radeon_gem_prime_vunmap,
96}; 140};
97 141
98static int radeon_prime_create(struct drm_device *dev, 142static int radeon_prime_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 25f9eef12c42..e95c5e61d4e2 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev)
908 return r; 908 return r;
909 } 909 }
910 910
911 r = r600_audio_init(rdev);
912 if (r) {
913 dev_err(rdev->dev, "failed initializing audio\n");
914 return r;
915 }
916
917 r = radeon_ib_pool_start(rdev); 911 r = radeon_ib_pool_start(rdev);
918 if (r) 912 if (r)
919 return r; 913 return r;
@@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev)
922 if (r) 916 if (r)
923 return r; 917 return r;
924 918
919 r = r600_audio_init(rdev);
920 if (r) {
921 dev_err(rdev->dev, "failed initializing audio\n");
922 return r;
923 }
924
925 return 0; 925 return 0;
926} 926}
927 927
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3277ddecfe9f..159b6a43fda0 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev)
637 return r; 637 return r;
638 } 638 }
639 639
640 r = r600_audio_init(rdev);
641 if (r) {
642 dev_err(rdev->dev, "failed initializing audio\n");
643 return r;
644 }
645
646 r = radeon_ib_pool_start(rdev); 640 r = radeon_ib_pool_start(rdev);
647 if (r) 641 if (r)
648 return r; 642 return r;
@@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev)
651 if (r) 645 if (r)
652 return r; 646 return r;
653 647
648 r = r600_audio_init(rdev);
649 if (r) {
650 dev_err(rdev->dev, "failed initializing audio\n");
651 return r;
652 }
653
654 return 0; 654 return 0;
655} 655}
656 656
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c2f473bc13b8..4ad0281fdc37 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
151 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 151 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
152 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 152 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
153 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 153 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
154 if (rdev->family == CHIP_RV740)
155 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
154 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 156 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
155 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 157 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
156 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 158 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@@ -363,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev)
363/* 365/*
364 * Core functions 366 * Core functions
365 */ 367 */
366static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
367 u32 num_tile_pipes,
368 u32 num_backends,
369 u32 backend_disable_mask)
370{
371 u32 backend_map = 0;
372 u32 enabled_backends_mask;
373 u32 enabled_backends_count;
374 u32 cur_pipe;
375 u32 swizzle_pipe[R7XX_MAX_PIPES];
376 u32 cur_backend;
377 u32 i;
378 bool force_no_swizzle;
379
380 if (num_tile_pipes > R7XX_MAX_PIPES)
381 num_tile_pipes = R7XX_MAX_PIPES;
382 if (num_tile_pipes < 1)
383 num_tile_pipes = 1;
384 if (num_backends > R7XX_MAX_BACKENDS)
385 num_backends = R7XX_MAX_BACKENDS;
386 if (num_backends < 1)
387 num_backends = 1;
388
389 enabled_backends_mask = 0;
390 enabled_backends_count = 0;
391 for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
392 if (((backend_disable_mask >> i) & 1) == 0) {
393 enabled_backends_mask |= (1 << i);
394 ++enabled_backends_count;
395 }
396 if (enabled_backends_count == num_backends)
397 break;
398 }
399
400 if (enabled_backends_count == 0) {
401 enabled_backends_mask = 1;
402 enabled_backends_count = 1;
403 }
404
405 if (enabled_backends_count != num_backends)
406 num_backends = enabled_backends_count;
407
408 switch (rdev->family) {
409 case CHIP_RV770:
410 case CHIP_RV730:
411 force_no_swizzle = false;
412 break;
413 case CHIP_RV710:
414 case CHIP_RV740:
415 default:
416 force_no_swizzle = true;
417 break;
418 }
419
420 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
421 switch (num_tile_pipes) {
422 case 1:
423 swizzle_pipe[0] = 0;
424 break;
425 case 2:
426 swizzle_pipe[0] = 0;
427 swizzle_pipe[1] = 1;
428 break;
429 case 3:
430 if (force_no_swizzle) {
431 swizzle_pipe[0] = 0;
432 swizzle_pipe[1] = 1;
433 swizzle_pipe[2] = 2;
434 } else {
435 swizzle_pipe[0] = 0;
436 swizzle_pipe[1] = 2;
437 swizzle_pipe[2] = 1;
438 }
439 break;
440 case 4:
441 if (force_no_swizzle) {
442 swizzle_pipe[0] = 0;
443 swizzle_pipe[1] = 1;
444 swizzle_pipe[2] = 2;
445 swizzle_pipe[3] = 3;
446 } else {
447 swizzle_pipe[0] = 0;
448 swizzle_pipe[1] = 2;
449 swizzle_pipe[2] = 3;
450 swizzle_pipe[3] = 1;
451 }
452 break;
453 case 5:
454 if (force_no_swizzle) {
455 swizzle_pipe[0] = 0;
456 swizzle_pipe[1] = 1;
457 swizzle_pipe[2] = 2;
458 swizzle_pipe[3] = 3;
459 swizzle_pipe[4] = 4;
460 } else {
461 swizzle_pipe[0] = 0;
462 swizzle_pipe[1] = 2;
463 swizzle_pipe[2] = 4;
464 swizzle_pipe[3] = 1;
465 swizzle_pipe[4] = 3;
466 }
467 break;
468 case 6:
469 if (force_no_swizzle) {
470 swizzle_pipe[0] = 0;
471 swizzle_pipe[1] = 1;
472 swizzle_pipe[2] = 2;
473 swizzle_pipe[3] = 3;
474 swizzle_pipe[4] = 4;
475 swizzle_pipe[5] = 5;
476 } else {
477 swizzle_pipe[0] = 0;
478 swizzle_pipe[1] = 2;
479 swizzle_pipe[2] = 4;
480 swizzle_pipe[3] = 5;
481 swizzle_pipe[4] = 3;
482 swizzle_pipe[5] = 1;
483 }
484 break;
485 case 7:
486 if (force_no_swizzle) {
487 swizzle_pipe[0] = 0;
488 swizzle_pipe[1] = 1;
489 swizzle_pipe[2] = 2;
490 swizzle_pipe[3] = 3;
491 swizzle_pipe[4] = 4;
492 swizzle_pipe[5] = 5;
493 swizzle_pipe[6] = 6;
494 } else {
495 swizzle_pipe[0] = 0;
496 swizzle_pipe[1] = 2;
497 swizzle_pipe[2] = 4;
498 swizzle_pipe[3] = 6;
499 swizzle_pipe[4] = 3;
500 swizzle_pipe[5] = 1;
501 swizzle_pipe[6] = 5;
502 }
503 break;
504 case 8:
505 if (force_no_swizzle) {
506 swizzle_pipe[0] = 0;
507 swizzle_pipe[1] = 1;
508 swizzle_pipe[2] = 2;
509 swizzle_pipe[3] = 3;
510 swizzle_pipe[4] = 4;
511 swizzle_pipe[5] = 5;
512 swizzle_pipe[6] = 6;
513 swizzle_pipe[7] = 7;
514 } else {
515 swizzle_pipe[0] = 0;
516 swizzle_pipe[1] = 2;
517 swizzle_pipe[2] = 4;
518 swizzle_pipe[3] = 6;
519 swizzle_pipe[4] = 3;
520 swizzle_pipe[5] = 1;
521 swizzle_pipe[6] = 7;
522 swizzle_pipe[7] = 5;
523 }
524 break;
525 }
526
527 cur_backend = 0;
528 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
529 while (((1 << cur_backend) & enabled_backends_mask) == 0)
530 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
531
532 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
533
534 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
535 }
536
537 return backend_map;
538}
539
540static void rv770_gpu_init(struct radeon_device *rdev) 368static void rv770_gpu_init(struct radeon_device *rdev)
541{ 369{
542 int i, j, num_qd_pipes; 370 int i, j, num_qd_pipes;
@@ -552,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
552 u32 sq_thread_resource_mgmt; 380 u32 sq_thread_resource_mgmt;
553 u32 hdp_host_path_cntl; 381 u32 hdp_host_path_cntl;
554 u32 sq_dyn_gpr_size_simd_ab_0; 382 u32 sq_dyn_gpr_size_simd_ab_0;
555 u32 backend_map;
556 u32 gb_tiling_config = 0; 383 u32 gb_tiling_config = 0;
557 u32 cc_rb_backend_disable = 0; 384 u32 cc_rb_backend_disable = 0;
558 u32 cc_gc_shader_pipe_config = 0; 385 u32 cc_gc_shader_pipe_config = 0;
559 u32 mc_arb_ramcfg; 386 u32 mc_arb_ramcfg;
560 u32 db_debug4; 387 u32 db_debug4, tmp;
388 u32 inactive_pipes, shader_pipe_config;
389 u32 disabled_rb_mask;
390 unsigned active_number;
561 391
562 /* setup chip specs */ 392 /* setup chip specs */
393 rdev->config.rv770.tiling_group_size = 256;
563 switch (rdev->family) { 394 switch (rdev->family) {
564 case CHIP_RV770: 395 case CHIP_RV770:
565 rdev->config.rv770.max_pipes = 4; 396 rdev->config.rv770.max_pipes = 4;
@@ -670,33 +501,70 @@ static void rv770_gpu_init(struct radeon_device *rdev)
670 /* setup tiling, simd, pipe config */ 501 /* setup tiling, simd, pipe config */
671 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 502 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
672 503
504 shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
505 inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
506 for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
507 if (!(inactive_pipes & tmp)) {
508 active_number++;
509 }
510 tmp <<= 1;
511 }
512 if (active_number == 1) {
513 WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
514 } else {
515 WREG32(SPI_CONFIG_CNTL, 0);
516 }
517
518 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
519 tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
520 if (tmp < rdev->config.rv770.max_backends) {
521 rdev->config.rv770.max_backends = tmp;
522 }
523
524 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
525 tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
526 if (tmp < rdev->config.rv770.max_pipes) {
527 rdev->config.rv770.max_pipes = tmp;
528 }
529 tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
530 if (tmp < rdev->config.rv770.max_simds) {
531 rdev->config.rv770.max_simds = tmp;
532 }
533
673 switch (rdev->config.rv770.max_tile_pipes) { 534 switch (rdev->config.rv770.max_tile_pipes) {
674 case 1: 535 case 1:
675 default: 536 default:
676 gb_tiling_config |= PIPE_TILING(0); 537 gb_tiling_config = PIPE_TILING(0);
677 break; 538 break;
678 case 2: 539 case 2:
679 gb_tiling_config |= PIPE_TILING(1); 540 gb_tiling_config = PIPE_TILING(1);
680 break; 541 break;
681 case 4: 542 case 4:
682 gb_tiling_config |= PIPE_TILING(2); 543 gb_tiling_config = PIPE_TILING(2);
683 break; 544 break;
684 case 8: 545 case 8:
685 gb_tiling_config |= PIPE_TILING(3); 546 gb_tiling_config = PIPE_TILING(3);
686 break; 547 break;
687 } 548 }
688 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; 549 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
689 550
551 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
552 tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
553 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
554 R7XX_MAX_BACKENDS, disabled_rb_mask);
555 gb_tiling_config |= tmp << 16;
556 rdev->config.rv770.backend_map = tmp;
557
690 if (rdev->family == CHIP_RV770) 558 if (rdev->family == CHIP_RV770)
691 gb_tiling_config |= BANK_TILING(1); 559 gb_tiling_config |= BANK_TILING(1);
692 else 560 else {
693 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 561 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
562 gb_tiling_config |= BANK_TILING(1);
563 else
564 gb_tiling_config |= BANK_TILING(0);
565 }
694 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); 566 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
695 gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 567 gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
696 if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
697 rdev->config.rv770.tiling_group_size = 512;
698 else
699 rdev->config.rv770.tiling_group_size = 256;
700 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { 568 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
701 gb_tiling_config |= ROW_TILING(3); 569 gb_tiling_config |= ROW_TILING(3);
702 gb_tiling_config |= SAMPLE_SPLIT(3); 570 gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -708,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev)
708 } 576 }
709 577
710 gb_tiling_config |= BANK_SWAPS(1); 578 gb_tiling_config |= BANK_SWAPS(1);
711
712 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
713 cc_rb_backend_disable |=
714 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
715
716 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
717 cc_gc_shader_pipe_config |=
718 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
719 cc_gc_shader_pipe_config |=
720 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
721
722 if (rdev->family == CHIP_RV740)
723 backend_map = 0x28;
724 else
725 backend_map = r700_get_tile_pipe_to_backend_map(rdev,
726 rdev->config.rv770.max_tile_pipes,
727 (R7XX_MAX_BACKENDS -
728 r600_count_pipe_bits((cc_rb_backend_disable &
729 R7XX_MAX_BACKENDS_MASK) >> 16)),
730 (cc_rb_backend_disable >> 16));
731
732 rdev->config.rv770.tile_config = gb_tiling_config; 579 rdev->config.rv770.tile_config = gb_tiling_config;
733 rdev->config.rv770.backend_map = backend_map;
734 gb_tiling_config |= BACKEND_MAP(backend_map);
735 580
736 WREG32(GB_TILING_CONFIG, gb_tiling_config); 581 WREG32(GB_TILING_CONFIG, gb_tiling_config);
737 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 582 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
738 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 583 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
739 584
740 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
741 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
742 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
743 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
744
745 WREG32(CGTS_SYS_TCC_DISABLE, 0); 585 WREG32(CGTS_SYS_TCC_DISABLE, 0);
746 WREG32(CGTS_TCC_DISABLE, 0); 586 WREG32(CGTS_TCC_DISABLE, 0);
747 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); 587 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
748 WREG32(CGTS_USER_TCC_DISABLE, 0); 588 WREG32(CGTS_USER_TCC_DISABLE, 0);
749 589
750 num_qd_pipes = 590
751 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 591 num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
752 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); 592 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
753 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); 593 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
754 594
@@ -809,8 +649,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
809 649
810 WREG32(VGT_NUM_INSTANCES, 1); 650 WREG32(VGT_NUM_INSTANCES, 1);
811 651
812 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
813
814 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); 652 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
815 653
816 WREG32(CP_PERFMON_CNTL, 0); 654 WREG32(CP_PERFMON_CNTL, 0);
@@ -1118,6 +956,12 @@ static int rv770_startup(struct radeon_device *rdev)
1118 if (r) 956 if (r)
1119 return r; 957 return r;
1120 958
959 r = r600_audio_init(rdev);
960 if (r) {
961 DRM_ERROR("radeon: audio init failed\n");
962 return r;
963 }
964
1121 return 0; 965 return 0;
1122} 966}
1123 967
@@ -1140,12 +984,6 @@ int rv770_resume(struct radeon_device *rdev)
1140 return r; 984 return r;
1141 } 985 }
1142 986
1143 r = r600_audio_init(rdev);
1144 if (r) {
1145 dev_err(rdev->dev, "radeon: audio init failed\n");
1146 return r;
1147 }
1148
1149 return r; 987 return r;
1150 988
1151} 989}
@@ -1254,12 +1092,6 @@ int rv770_init(struct radeon_device *rdev)
1254 rdev->accel_working = false; 1092 rdev->accel_working = false;
1255 } 1093 }
1256 1094
1257 r = r600_audio_init(rdev);
1258 if (r) {
1259 dev_err(rdev->dev, "radeon: audio init failed\n");
1260 return r;
1261 }
1262
1263 return 0; 1095 return 0;
1264} 1096}
1265 1097
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9c549f702f2f..fdc089896011 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -106,10 +106,13 @@
106#define BACKEND_MAP(x) ((x) << 16) 106#define BACKEND_MAP(x) ((x) << 16)
107 107
108#define GB_TILING_CONFIG 0x98F0 108#define GB_TILING_CONFIG 0x98F0
109#define PIPE_TILING__SHIFT 1
110#define PIPE_TILING__MASK 0x0000000e
109 111
110#define GC_USER_SHADER_PIPE_CONFIG 0x8954 112#define GC_USER_SHADER_PIPE_CONFIG 0x8954
111#define INACTIVE_QD_PIPES(x) ((x) << 8) 113#define INACTIVE_QD_PIPES(x) ((x) << 8)
112#define INACTIVE_QD_PIPES_MASK 0x0000FF00 114#define INACTIVE_QD_PIPES_MASK 0x0000FF00
115#define INACTIVE_QD_PIPES_SHIFT 8
113#define INACTIVE_SIMDS(x) ((x) << 16) 116#define INACTIVE_SIMDS(x) ((x) << 16)
114#define INACTIVE_SIMDS_MASK 0x00FF0000 117#define INACTIVE_SIMDS_MASK 0x00FF0000
115 118
@@ -174,6 +177,7 @@
174#define MC_VM_MD_L1_TLB0_CNTL 0x2654 177#define MC_VM_MD_L1_TLB0_CNTL 0x2654
175#define MC_VM_MD_L1_TLB1_CNTL 0x2658 178#define MC_VM_MD_L1_TLB1_CNTL 0x2658
176#define MC_VM_MD_L1_TLB2_CNTL 0x265C 179#define MC_VM_MD_L1_TLB2_CNTL 0x265C
180#define MC_VM_MD_L1_TLB3_CNTL 0x2698
177#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C 181#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
178#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 182#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
179#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 183#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 549732e56ca9..c7b61f16ecfd 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
867/* 867/*
868 * Core functions 868 * Core functions
869 */ 869 */
870static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
871 u32 num_tile_pipes,
872 u32 num_backends_per_asic,
873 u32 *backend_disable_mask_per_asic,
874 u32 num_shader_engines)
875{
876 u32 backend_map = 0;
877 u32 enabled_backends_mask = 0;
878 u32 enabled_backends_count = 0;
879 u32 num_backends_per_se;
880 u32 cur_pipe;
881 u32 swizzle_pipe[SI_MAX_PIPES];
882 u32 cur_backend = 0;
883 u32 i;
884 bool force_no_swizzle;
885
886 /* force legal values */
887 if (num_tile_pipes < 1)
888 num_tile_pipes = 1;
889 if (num_tile_pipes > rdev->config.si.max_tile_pipes)
890 num_tile_pipes = rdev->config.si.max_tile_pipes;
891 if (num_shader_engines < 1)
892 num_shader_engines = 1;
893 if (num_shader_engines > rdev->config.si.max_shader_engines)
894 num_shader_engines = rdev->config.si.max_shader_engines;
895 if (num_backends_per_asic < num_shader_engines)
896 num_backends_per_asic = num_shader_engines;
897 if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines))
898 num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines;
899
900 /* make sure we have the same number of backends per se */
901 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
902 /* set up the number of backends per se */
903 num_backends_per_se = num_backends_per_asic / num_shader_engines;
904 if (num_backends_per_se > rdev->config.si.max_backends_per_se) {
905 num_backends_per_se = rdev->config.si.max_backends_per_se;
906 num_backends_per_asic = num_backends_per_se * num_shader_engines;
907 }
908
909 /* create enable mask and count for enabled backends */
910 for (i = 0; i < SI_MAX_BACKENDS; ++i) {
911 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
912 enabled_backends_mask |= (1 << i);
913 ++enabled_backends_count;
914 }
915 if (enabled_backends_count == num_backends_per_asic)
916 break;
917 }
918
919 /* force the backends mask to match the current number of backends */
920 if (enabled_backends_count != num_backends_per_asic) {
921 u32 this_backend_enabled;
922 u32 shader_engine;
923 u32 backend_per_se;
924
925 enabled_backends_mask = 0;
926 enabled_backends_count = 0;
927 *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK;
928 for (i = 0; i < SI_MAX_BACKENDS; ++i) {
929 /* calc the current se */
930 shader_engine = i / rdev->config.si.max_backends_per_se;
931 /* calc the backend per se */
932 backend_per_se = i % rdev->config.si.max_backends_per_se;
933 /* default to not enabled */
934 this_backend_enabled = 0;
935 if ((shader_engine < num_shader_engines) &&
936 (backend_per_se < num_backends_per_se))
937 this_backend_enabled = 1;
938 if (this_backend_enabled) {
939 enabled_backends_mask |= (1 << i);
940 *backend_disable_mask_per_asic &= ~(1 << i);
941 ++enabled_backends_count;
942 }
943 }
944 }
945
946
947 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES);
948 switch (rdev->family) {
949 case CHIP_TAHITI:
950 case CHIP_PITCAIRN:
951 case CHIP_VERDE:
952 force_no_swizzle = true;
953 break;
954 default:
955 force_no_swizzle = false;
956 break;
957 }
958 if (force_no_swizzle) {
959 bool last_backend_enabled = false;
960
961 force_no_swizzle = false;
962 for (i = 0; i < SI_MAX_BACKENDS; ++i) {
963 if (((enabled_backends_mask >> i) & 1) == 1) {
964 if (last_backend_enabled)
965 force_no_swizzle = true;
966 last_backend_enabled = true;
967 } else
968 last_backend_enabled = false;
969 }
970 }
971
972 switch (num_tile_pipes) {
973 case 1:
974 case 3:
975 case 5:
976 case 7:
977 DRM_ERROR("odd number of pipes!\n");
978 break;
979 case 2:
980 swizzle_pipe[0] = 0;
981 swizzle_pipe[1] = 1;
982 break;
983 case 4:
984 if (force_no_swizzle) {
985 swizzle_pipe[0] = 0;
986 swizzle_pipe[1] = 1;
987 swizzle_pipe[2] = 2;
988 swizzle_pipe[3] = 3;
989 } else {
990 swizzle_pipe[0] = 0;
991 swizzle_pipe[1] = 2;
992 swizzle_pipe[2] = 1;
993 swizzle_pipe[3] = 3;
994 }
995 break;
996 case 6:
997 if (force_no_swizzle) {
998 swizzle_pipe[0] = 0;
999 swizzle_pipe[1] = 1;
1000 swizzle_pipe[2] = 2;
1001 swizzle_pipe[3] = 3;
1002 swizzle_pipe[4] = 4;
1003 swizzle_pipe[5] = 5;
1004 } else {
1005 swizzle_pipe[0] = 0;
1006 swizzle_pipe[1] = 2;
1007 swizzle_pipe[2] = 4;
1008 swizzle_pipe[3] = 1;
1009 swizzle_pipe[4] = 3;
1010 swizzle_pipe[5] = 5;
1011 }
1012 break;
1013 case 8:
1014 if (force_no_swizzle) {
1015 swizzle_pipe[0] = 0;
1016 swizzle_pipe[1] = 1;
1017 swizzle_pipe[2] = 2;
1018 swizzle_pipe[3] = 3;
1019 swizzle_pipe[4] = 4;
1020 swizzle_pipe[5] = 5;
1021 swizzle_pipe[6] = 6;
1022 swizzle_pipe[7] = 7;
1023 } else {
1024 swizzle_pipe[0] = 0;
1025 swizzle_pipe[1] = 2;
1026 swizzle_pipe[2] = 4;
1027 swizzle_pipe[3] = 6;
1028 swizzle_pipe[4] = 1;
1029 swizzle_pipe[5] = 3;
1030 swizzle_pipe[6] = 5;
1031 swizzle_pipe[7] = 7;
1032 }
1033 break;
1034 }
1035
1036 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1037 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1038 cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
1039
1040 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
1041
1042 cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
1043 }
1044
1045 return backend_map;
1046}
1047
1048static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev,
1049 u32 disable_mask_per_se,
1050 u32 max_disable_mask_per_se,
1051 u32 num_shader_engines)
1052{
1053 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
1054 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
1055
1056 if (num_shader_engines == 1)
1057 return disable_mask_per_asic;
1058 else if (num_shader_engines == 2)
1059 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
1060 else
1061 return 0xffffffff;
1062}
1063
1064static void si_tiling_mode_table_init(struct radeon_device *rdev) 870static void si_tiling_mode_table_init(struct radeon_device *rdev)
1065{ 871{
1066 const u32 num_tile_mode_states = 32; 872 const u32 num_tile_mode_states = 32;
@@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
1562 DRM_ERROR("unknown asic: 0x%x\n", rdev->family); 1368 DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
1563} 1369}
1564 1370
1371static void si_select_se_sh(struct radeon_device *rdev,
1372 u32 se_num, u32 sh_num)
1373{
1374 u32 data = INSTANCE_BROADCAST_WRITES;
1375
1376 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1377 data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1378 else if (se_num == 0xffffffff)
1379 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1380 else if (sh_num == 0xffffffff)
1381 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
1382 else
1383 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
1384 WREG32(GRBM_GFX_INDEX, data);
1385}
1386
1387static u32 si_create_bitmask(u32 bit_width)
1388{
1389 u32 i, mask = 0;
1390
1391 for (i = 0; i < bit_width; i++) {
1392 mask <<= 1;
1393 mask |= 1;
1394 }
1395 return mask;
1396}
1397
1398static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
1399{
1400 u32 data, mask;
1401
1402 data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
1403 if (data & 1)
1404 data &= INACTIVE_CUS_MASK;
1405 else
1406 data = 0;
1407 data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
1408
1409 data >>= INACTIVE_CUS_SHIFT;
1410
1411 mask = si_create_bitmask(cu_per_sh);
1412
1413 return ~data & mask;
1414}
1415
1416static void si_setup_spi(struct radeon_device *rdev,
1417 u32 se_num, u32 sh_per_se,
1418 u32 cu_per_sh)
1419{
1420 int i, j, k;
1421 u32 data, mask, active_cu;
1422
1423 for (i = 0; i < se_num; i++) {
1424 for (j = 0; j < sh_per_se; j++) {
1425 si_select_se_sh(rdev, i, j);
1426 data = RREG32(SPI_STATIC_THREAD_MGMT_3);
1427 active_cu = si_get_cu_enabled(rdev, cu_per_sh);
1428
1429 mask = 1;
1430 for (k = 0; k < 16; k++) {
1431 mask <<= k;
1432 if (active_cu & mask) {
1433 data &= ~mask;
1434 WREG32(SPI_STATIC_THREAD_MGMT_3, data);
1435 break;
1436 }
1437 }
1438 }
1439 }
1440 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1441}
1442
1443static u32 si_get_rb_disabled(struct radeon_device *rdev,
1444 u32 max_rb_num, u32 se_num,
1445 u32 sh_per_se)
1446{
1447 u32 data, mask;
1448
1449 data = RREG32(CC_RB_BACKEND_DISABLE);
1450 if (data & 1)
1451 data &= BACKEND_DISABLE_MASK;
1452 else
1453 data = 0;
1454 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
1455
1456 data >>= BACKEND_DISABLE_SHIFT;
1457
1458 mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
1459
1460 return data & mask;
1461}
1462
1463static void si_setup_rb(struct radeon_device *rdev,
1464 u32 se_num, u32 sh_per_se,
1465 u32 max_rb_num)
1466{
1467 int i, j;
1468 u32 data, mask;
1469 u32 disabled_rbs = 0;
1470 u32 enabled_rbs = 0;
1471
1472 for (i = 0; i < se_num; i++) {
1473 for (j = 0; j < sh_per_se; j++) {
1474 si_select_se_sh(rdev, i, j);
1475 data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
1476 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
1477 }
1478 }
1479 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1480
1481 mask = 1;
1482 for (i = 0; i < max_rb_num; i++) {
1483 if (!(disabled_rbs & mask))
1484 enabled_rbs |= mask;
1485 mask <<= 1;
1486 }
1487
1488 for (i = 0; i < se_num; i++) {
1489 si_select_se_sh(rdev, i, 0xffffffff);
1490 data = 0;
1491 for (j = 0; j < sh_per_se; j++) {
1492 switch (enabled_rbs & 3) {
1493 case 1:
1494 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
1495 break;
1496 case 2:
1497 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
1498 break;
1499 case 3:
1500 default:
1501 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
1502 break;
1503 }
1504 enabled_rbs >>= 2;
1505 }
1506 WREG32(PA_SC_RASTER_CONFIG, data);
1507 }
1508 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1509}
1510
1565static void si_gpu_init(struct radeon_device *rdev) 1511static void si_gpu_init(struct radeon_device *rdev)
1566{ 1512{
1567 u32 cc_rb_backend_disable = 0;
1568 u32 cc_gc_shader_array_config;
1569 u32 gb_addr_config = 0; 1513 u32 gb_addr_config = 0;
1570 u32 mc_shared_chmap, mc_arb_ramcfg; 1514 u32 mc_shared_chmap, mc_arb_ramcfg;
1571 u32 gb_backend_map;
1572 u32 cgts_tcc_disable;
1573 u32 sx_debug_1; 1515 u32 sx_debug_1;
1574 u32 gc_user_shader_array_config;
1575 u32 gc_user_rb_backend_disable;
1576 u32 cgts_user_tcc_disable;
1577 u32 hdp_host_path_cntl; 1516 u32 hdp_host_path_cntl;
1578 u32 tmp; 1517 u32 tmp;
1579 int i, j; 1518 int i, j;
@@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev)
1581 switch (rdev->family) { 1520 switch (rdev->family) {
1582 case CHIP_TAHITI: 1521 case CHIP_TAHITI:
1583 rdev->config.si.max_shader_engines = 2; 1522 rdev->config.si.max_shader_engines = 2;
1584 rdev->config.si.max_pipes_per_simd = 4;
1585 rdev->config.si.max_tile_pipes = 12; 1523 rdev->config.si.max_tile_pipes = 12;
1586 rdev->config.si.max_simds_per_se = 8; 1524 rdev->config.si.max_cu_per_sh = 8;
1525 rdev->config.si.max_sh_per_se = 2;
1587 rdev->config.si.max_backends_per_se = 4; 1526 rdev->config.si.max_backends_per_se = 4;
1588 rdev->config.si.max_texture_channel_caches = 12; 1527 rdev->config.si.max_texture_channel_caches = 12;
1589 rdev->config.si.max_gprs = 256; 1528 rdev->config.si.max_gprs = 256;
@@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev)
1594 rdev->config.si.sc_prim_fifo_size_backend = 0x100; 1533 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
1595 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1534 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1596 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1535 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1536 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
1597 break; 1537 break;
1598 case CHIP_PITCAIRN: 1538 case CHIP_PITCAIRN:
1599 rdev->config.si.max_shader_engines = 2; 1539 rdev->config.si.max_shader_engines = 2;
1600 rdev->config.si.max_pipes_per_simd = 4;
1601 rdev->config.si.max_tile_pipes = 8; 1540 rdev->config.si.max_tile_pipes = 8;
1602 rdev->config.si.max_simds_per_se = 5; 1541 rdev->config.si.max_cu_per_sh = 5;
1542 rdev->config.si.max_sh_per_se = 2;
1603 rdev->config.si.max_backends_per_se = 4; 1543 rdev->config.si.max_backends_per_se = 4;
1604 rdev->config.si.max_texture_channel_caches = 8; 1544 rdev->config.si.max_texture_channel_caches = 8;
1605 rdev->config.si.max_gprs = 256; 1545 rdev->config.si.max_gprs = 256;
@@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev)
1610 rdev->config.si.sc_prim_fifo_size_backend = 0x100; 1550 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
1611 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1551 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1612 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1552 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1553 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
1613 break; 1554 break;
1614 case CHIP_VERDE: 1555 case CHIP_VERDE:
1615 default: 1556 default:
1616 rdev->config.si.max_shader_engines = 1; 1557 rdev->config.si.max_shader_engines = 1;
1617 rdev->config.si.max_pipes_per_simd = 4;
1618 rdev->config.si.max_tile_pipes = 4; 1558 rdev->config.si.max_tile_pipes = 4;
1619 rdev->config.si.max_simds_per_se = 2; 1559 rdev->config.si.max_cu_per_sh = 2;
1560 rdev->config.si.max_sh_per_se = 2;
1620 rdev->config.si.max_backends_per_se = 4; 1561 rdev->config.si.max_backends_per_se = 4;
1621 rdev->config.si.max_texture_channel_caches = 4; 1562 rdev->config.si.max_texture_channel_caches = 4;
1622 rdev->config.si.max_gprs = 256; 1563 rdev->config.si.max_gprs = 256;
@@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev)
1627 rdev->config.si.sc_prim_fifo_size_backend = 0x40; 1568 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
1628 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1569 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1629 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1570 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1571 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
1630 break; 1572 break;
1631 } 1573 }
1632 1574
@@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev)
1648 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1590 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1649 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1591 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1650 1592
1651 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
1652 cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
1653 cgts_tcc_disable = 0xffff0000;
1654 for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++)
1655 cgts_tcc_disable &= ~(1 << (16 + i));
1656 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
1657 gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
1658 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
1659
1660 rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines;
1661 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; 1593 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
1662 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
1663 rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp);
1664 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
1665 rdev->config.si.backend_disable_mask_per_asic =
1666 si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK,
1667 rdev->config.si.num_shader_engines);
1668 rdev->config.si.backend_map =
1669 si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
1670 rdev->config.si.num_backends_per_se *
1671 rdev->config.si.num_shader_engines,
1672 &rdev->config.si.backend_disable_mask_per_asic,
1673 rdev->config.si.num_shader_engines);
1674 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
1675 rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp);
1676 rdev->config.si.mem_max_burst_length_bytes = 256; 1594 rdev->config.si.mem_max_burst_length_bytes = 256;
1677 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 1595 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1678 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 1596 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
@@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev)
1683 rdev->config.si.num_gpus = 1; 1601 rdev->config.si.num_gpus = 1;
1684 rdev->config.si.multi_gpu_tile_size = 64; 1602 rdev->config.si.multi_gpu_tile_size = 64;
1685 1603
1686 gb_addr_config = 0; 1604 /* fix up row size */
1687 switch (rdev->config.si.num_tile_pipes) { 1605 gb_addr_config &= ~ROW_SIZE_MASK;
1688 case 1:
1689 gb_addr_config |= NUM_PIPES(0);
1690 break;
1691 case 2:
1692 gb_addr_config |= NUM_PIPES(1);
1693 break;
1694 case 4:
1695 gb_addr_config |= NUM_PIPES(2);
1696 break;
1697 case 8:
1698 default:
1699 gb_addr_config |= NUM_PIPES(3);
1700 break;
1701 }
1702
1703 tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1;
1704 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
1705 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1);
1706 tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1;
1707 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
1708 switch (rdev->config.si.num_gpus) {
1709 case 1:
1710 default:
1711 gb_addr_config |= NUM_GPUS(0);
1712 break;
1713 case 2:
1714 gb_addr_config |= NUM_GPUS(1);
1715 break;
1716 case 4:
1717 gb_addr_config |= NUM_GPUS(2);
1718 break;
1719 }
1720 switch (rdev->config.si.multi_gpu_tile_size) {
1721 case 16:
1722 gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
1723 break;
1724 case 32:
1725 default:
1726 gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
1727 break;
1728 case 64:
1729 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
1730 break;
1731 case 128:
1732 gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
1733 break;
1734 }
1735 switch (rdev->config.si.mem_row_size_in_kb) { 1606 switch (rdev->config.si.mem_row_size_in_kb) {
1736 case 1: 1607 case 1:
1737 default: 1608 default:
@@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev)
1745 break; 1616 break;
1746 } 1617 }
1747 1618
1748 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
1749 rdev->config.si.num_tile_pipes = (1 << tmp);
1750 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
1751 rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256;
1752 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
1753 rdev->config.si.num_shader_engines = tmp + 1;
1754 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
1755 rdev->config.si.num_gpus = tmp + 1;
1756 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
1757 rdev->config.si.multi_gpu_tile_size = 1 << tmp;
1758 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
1759 rdev->config.si.mem_row_size_in_kb = 1 << tmp;
1760
1761 gb_backend_map =
1762 si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
1763 rdev->config.si.num_backends_per_se *
1764 rdev->config.si.num_shader_engines,
1765 &rdev->config.si.backend_disable_mask_per_asic,
1766 rdev->config.si.num_shader_engines);
1767
1768 /* setup tiling info dword. gb_addr_config is not adequate since it does 1619 /* setup tiling info dword. gb_addr_config is not adequate since it does
1769 * not have bank info, so create a custom tiling dword. 1620 * not have bank info, so create a custom tiling dword.
1770 * bits 3:0 num_pipes 1621 * bits 3:0 num_pipes
@@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev)
1789 rdev->config.si.tile_config |= (3 << 0); 1640 rdev->config.si.tile_config |= (3 << 0);
1790 break; 1641 break;
1791 } 1642 }
1792 rdev->config.si.tile_config |= 1643 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
1793 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1644 rdev->config.si.tile_config |= 1 << 4;
1645 else
1646 rdev->config.si.tile_config |= 0 << 4;
1794 rdev->config.si.tile_config |= 1647 rdev->config.si.tile_config |=
1795 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 1648 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1796 rdev->config.si.tile_config |= 1649 rdev->config.si.tile_config |=
1797 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 1650 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1798 1651
1799 rdev->config.si.backend_map = gb_backend_map;
1800 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1652 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1801 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1653 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1802 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 1654 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1803 1655
1804 /* primary versions */ 1656 si_tiling_mode_table_init(rdev);
1805 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1806 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1807 WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
1808
1809 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
1810 1657
1811 /* user versions */ 1658 si_setup_rb(rdev, rdev->config.si.max_shader_engines,
1812 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1659 rdev->config.si.max_sh_per_se,
1813 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1660 rdev->config.si.max_backends_per_se);
1814 WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
1815 1661
1816 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 1662 si_setup_spi(rdev, rdev->config.si.max_shader_engines,
1663 rdev->config.si.max_sh_per_se,
1664 rdev->config.si.max_cu_per_sh);
1817 1665
1818 si_tiling_mode_table_init(rdev);
1819 1666
1820 /* set HW defaults for 3D engine */ 1667 /* set HW defaults for 3D engine */
1821 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | 1668 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 53ea2c42dbd6..db4067962868 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -24,6 +24,11 @@
24#ifndef SI_H 24#ifndef SI_H
25#define SI_H 25#define SI_H
26 26
27#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
28
29#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
31
27#define CG_MULT_THERMAL_STATUS 0x714 32#define CG_MULT_THERMAL_STATUS 0x714
28#define ASIC_MAX_TEMP(x) ((x) << 0) 33#define ASIC_MAX_TEMP(x) ((x) << 0)
29#define ASIC_MAX_TEMP_MASK 0x000001ff 34#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -408,6 +413,12 @@
408#define SOFT_RESET_IA (1 << 15) 413#define SOFT_RESET_IA (1 << 15)
409 414
410#define GRBM_GFX_INDEX 0x802C 415#define GRBM_GFX_INDEX 0x802C
416#define INSTANCE_INDEX(x) ((x) << 0)
417#define SH_INDEX(x) ((x) << 8)
418#define SE_INDEX(x) ((x) << 16)
419#define SH_BROADCAST_WRITES (1 << 29)
420#define INSTANCE_BROADCAST_WRITES (1 << 30)
421#define SE_BROADCAST_WRITES (1 << 31)
411 422
412#define GRBM_INT_CNTL 0x8060 423#define GRBM_INT_CNTL 0x8060
413# define RDERR_INT_ENABLE (1 << 0) 424# define RDERR_INT_ENABLE (1 << 0)
@@ -480,6 +491,8 @@
480#define VGT_TF_MEMORY_BASE 0x89B8 491#define VGT_TF_MEMORY_BASE 0x89B8
481 492
482#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc 493#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc
494#define INACTIVE_CUS_MASK 0xFFFF0000
495#define INACTIVE_CUS_SHIFT 16
483#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 496#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0
484 497
485#define PA_CL_ENHANCE 0x8A14 498#define PA_CL_ENHANCE 0x8A14
@@ -688,6 +701,12 @@
688#define RLC_MC_CNTL 0xC344 701#define RLC_MC_CNTL 0xC344
689#define RLC_UCODE_CNTL 0xC348 702#define RLC_UCODE_CNTL 0xC348
690 703
704#define PA_SC_RASTER_CONFIG 0x28350
705# define RASTER_CONFIG_RB_MAP_0 0
706# define RASTER_CONFIG_RB_MAP_1 1
707# define RASTER_CONFIG_RB_MAP_2 2
708# define RASTER_CONFIG_RB_MAP_3 3
709
691#define VGT_EVENT_INITIATOR 0x28a90 710#define VGT_EVENT_INITIATOR 0x28a90
692# define SAMPLE_STREAMOUTSTATS1 (1 << 0) 711# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
693# define SAMPLE_STREAMOUTSTATS2 (2 << 0) 712# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 36792bd4da77..b67cfcaa661f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1834,6 +1834,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1834 spin_unlock(&glob->lru_lock); 1834 spin_unlock(&glob->lru_lock);
1835 (void) ttm_bo_cleanup_refs(bo, false, false, false); 1835 (void) ttm_bo_cleanup_refs(bo, false, false, false);
1836 kref_put(&bo->list_kref, ttm_bo_release_list); 1836 kref_put(&bo->list_kref, ttm_bo_release_list);
1837 spin_lock(&glob->lru_lock);
1837 continue; 1838 continue;
1838 } 1839 }
1839 1840
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index a029ee39b0c5..ce9a61179925 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -156,8 +156,17 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
156 if (!fb->active_16) 156 if (!fb->active_16)
157 return 0; 157 return 0;
158 158
159 if (!fb->obj->vmapping) 159 if (!fb->obj->vmapping) {
160 udl_gem_vmap(fb->obj); 160 ret = udl_gem_vmap(fb->obj);
161 if (ret == -ENOMEM) {
162 DRM_ERROR("failed to vmap fb\n");
163 return 0;
164 }
165 if (!fb->obj->vmapping) {
166 DRM_ERROR("failed to vmapping\n");
167 return 0;
168 }
169 }
161 170
162 start_cycles = get_cycles(); 171 start_cycles = get_cycles();
163 172
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 97acc9c6c95b..7bd65bdd15a8 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -180,6 +180,18 @@ int udl_gem_vmap(struct udl_gem_object *obj)
180 int page_count = obj->base.size / PAGE_SIZE; 180 int page_count = obj->base.size / PAGE_SIZE;
181 int ret; 181 int ret;
182 182
183 if (obj->base.import_attach) {
184 ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
185 0, obj->base.size, DMA_BIDIRECTIONAL);
186 if (ret)
187 return -EINVAL;
188
189 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
190 if (!obj->vmapping)
191 return -ENOMEM;
192 return 0;
193 }
194
183 ret = udl_gem_get_pages(obj, GFP_KERNEL); 195 ret = udl_gem_get_pages(obj, GFP_KERNEL);
184 if (ret) 196 if (ret)
185 return ret; 197 return ret;
@@ -192,6 +204,13 @@ int udl_gem_vmap(struct udl_gem_object *obj)
192 204
193void udl_gem_vunmap(struct udl_gem_object *obj) 205void udl_gem_vunmap(struct udl_gem_object *obj)
194{ 206{
207 if (obj->base.import_attach) {
208 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
209 dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
210 obj->base.size, DMA_BIDIRECTIONAL);
211 return;
212 }
213
195 if (obj->vmapping) 214 if (obj->vmapping)
196 vunmap(obj->vmapping); 215 vunmap(obj->vmapping);
197 216
@@ -202,12 +221,12 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
202{ 221{
203 struct udl_gem_object *obj = to_udl_bo(gem_obj); 222 struct udl_gem_object *obj = to_udl_bo(gem_obj);
204 223
205 if (gem_obj->import_attach)
206 drm_prime_gem_destroy(gem_obj, obj->sg);
207
208 if (obj->vmapping) 224 if (obj->vmapping)
209 udl_gem_vunmap(obj); 225 udl_gem_vunmap(obj);
210 226
227 if (gem_obj->import_attach)
228 drm_prime_gem_destroy(gem_obj, obj->sg);
229
211 if (obj->pages) 230 if (obj->pages)
212 udl_gem_put_pages(obj); 231 udl_gem_put_pages(obj);
213 232
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 51c9ba5cd2fb..21ee78226560 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
66 cmd += sizeof(remap_cmd) / sizeof(uint32); 66 cmd += sizeof(remap_cmd) / sizeof(uint32);
67 67
68 for (i = 0; i < num_pages; ++i) { 68 for (i = 0; i < num_pages; ++i) {
69 if (VMW_PPN_SIZE > 4) 69 if (VMW_PPN_SIZE <= 4)
70 *cmd = page_to_pfn(*pages++); 70 *cmd = page_to_pfn(*pages++);
71 else 71 else
72 *((uint64_t *)cmd) = page_to_pfn(*pages++); 72 *((uint64_t *)cmd) = page_to_pfn(*pages++);