aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-06-03 10:19:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-06-03 10:19:45 -0400
commit1067b6c2bea7fd2cc9da290d865ab3f3b91c8130 (patch)
tree5b20d5fbe9f9a2b3cfbb794018d74606f5031a88 /drivers/gpu
parenta652883a244901742d6c9733a9eebdf72e3114ea (diff)
parentd8dcaa1dc50f5aecd38d34180cd99d6af8566c88 (diff)
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (41 commits) drm/radeon/kms: make sure display hw is disabled when suspending drm/vmwgfx: Allow userspace to change default layout. Bump minor. drm/vmwgfx: Fix framebuffer modesetting drm/vmwgfx: Fix vga save / restore with display topology. vgaarb: use MIT license vgaarb: convert pr_devel() to pr_debug() drm: fix typos in Linux DRM Developer's Guide drm/radeon/kms/pm: voltage fixes drm/radeon/kms/pm: radeon_set_power_state fixes drm/radeon/kms/pm: patch default power state with default clocks/voltages on r6xx+ drm/radeon/kms/pm: enable SetVoltage on r7xx/evergreen drm/radeon/kms/pm: add support for SetVoltage cmd table (V2) drm/radeon/kms/evergreen: add initial CS parser drm/kms: disable/enable poll around switcheroo on/off drm/nouveau: fixup confusion over which handle the DSM is hanging off. drm/nouveau: attempt to get bios from ACPI v3 drm/nv50: cast IGP memory location to u64 before shifting drm/ttm: Fix ttm_page_alloc.c drm/ttm: Fix cached TTM page allocation. drm/vmwgfx: Remove some leftover debug messages. ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c28
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c7
-rw-r--r--drivers/gpu/drm/radeon/Makefile7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1356
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h464
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c75
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen611
-rw-r--r--drivers/gpu/drm/radeon/rs600.c3
-rw-r--r--drivers/gpu/drm/radeon/rv770.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c64
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c173
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c203
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c189
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/vga/vgaarb.c61
43 files changed, 3481 insertions, 315 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 764401951041..9b2a54117c91 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -860,19 +860,24 @@ static void output_poll_execute(struct slow_work *work)
860 } 860 }
861} 861}
862 862
863void drm_kms_helper_poll_init(struct drm_device *dev) 863void drm_kms_helper_poll_disable(struct drm_device *dev)
864{
865 if (!dev->mode_config.poll_enabled)
866 return;
867 delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
868}
869EXPORT_SYMBOL(drm_kms_helper_poll_disable);
870
871void drm_kms_helper_poll_enable(struct drm_device *dev)
864{ 872{
865 struct drm_connector *connector;
866 bool poll = false; 873 bool poll = false;
874 struct drm_connector *connector;
867 int ret; 875 int ret;
868 876
869 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 877 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
870 if (connector->polled) 878 if (connector->polled)
871 poll = true; 879 poll = true;
872 } 880 }
873 slow_work_register_user(THIS_MODULE);
874 delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
875 &output_poll_ops);
876 881
877 if (poll) { 882 if (poll) {
878 ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); 883 ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
@@ -880,11 +885,22 @@ void drm_kms_helper_poll_init(struct drm_device *dev)
880 DRM_ERROR("delayed enqueue failed %d\n", ret); 885 DRM_ERROR("delayed enqueue failed %d\n", ret);
881 } 886 }
882} 887}
888EXPORT_SYMBOL(drm_kms_helper_poll_enable);
889
890void drm_kms_helper_poll_init(struct drm_device *dev)
891{
892 slow_work_register_user(THIS_MODULE);
893 delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
894 &output_poll_ops);
895 dev->mode_config.poll_enabled = true;
896
897 drm_kms_helper_poll_enable(dev);
898}
883EXPORT_SYMBOL(drm_kms_helper_poll_init); 899EXPORT_SYMBOL(drm_kms_helper_poll_init);
884 900
885void drm_kms_helper_poll_fini(struct drm_device *dev) 901void drm_kms_helper_poll_fini(struct drm_device *dev)
886{ 902{
887 delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); 903 drm_kms_helper_poll_disable(dev);
888 slow_work_unregister_user(THIS_MODULE); 904 slow_work_unregister_user(THIS_MODULE);
889} 905}
890EXPORT_SYMBOL(drm_kms_helper_poll_fini); 906EXPORT_SYMBOL(drm_kms_helper_poll_fini);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 84ce95602f00..b2ebf02e4f8a 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1320,12 +1320,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1320 struct drm_device *dev = pci_get_drvdata(pdev); 1320 struct drm_device *dev = pci_get_drvdata(pdev);
1321 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1321 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1322 if (state == VGA_SWITCHEROO_ON) { 1322 if (state == VGA_SWITCHEROO_ON) {
1323 printk(KERN_INFO "i915: switched off\n"); 1323 printk(KERN_INFO "i915: switched on\n");
1324 /* i915 resume handler doesn't set to D0 */ 1324 /* i915 resume handler doesn't set to D0 */
1325 pci_set_power_state(dev->pdev, PCI_D0); 1325 pci_set_power_state(dev->pdev, PCI_D0);
1326 i915_resume(dev); 1326 i915_resume(dev);
1327 drm_kms_helper_poll_enable(dev);
1327 } else { 1328 } else {
1328 printk(KERN_ERR "i915: switched off\n"); 1329 printk(KERN_ERR "i915: switched off\n");
1330 drm_kms_helper_poll_disable(dev);
1329 i915_suspend(dev, pmm); 1331 i915_suspend(dev, pmm);
1330 } 1332 }
1331} 1333}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index e13f6af0037a..d4bcca8a5133 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -34,7 +34,7 @@
34static struct nouveau_dsm_priv { 34static struct nouveau_dsm_priv {
35 bool dsm_detected; 35 bool dsm_detected;
36 acpi_handle dhandle; 36 acpi_handle dhandle;
37 acpi_handle dsm_handle; 37 acpi_handle rom_handle;
38} nouveau_dsm_priv; 38} nouveau_dsm_priv;
39 39
40static const char nouveau_dsm_muid[] = { 40static const char nouveau_dsm_muid[] = {
@@ -107,9 +107,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
107static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) 107static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
108{ 108{
109 if (id == VGA_SWITCHEROO_IGD) 109 if (id == VGA_SWITCHEROO_IGD)
110 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA); 110 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
111 else 111 else
112 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED); 112 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED);
113} 113}
114 114
115static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, 115static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
@@ -118,7 +118,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
118 if (id == VGA_SWITCHEROO_IGD) 118 if (id == VGA_SWITCHEROO_IGD)
119 return 0; 119 return 0;
120 120
121 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state); 121 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
122} 122}
123 123
124static int nouveau_dsm_init(void) 124static int nouveau_dsm_init(void)
@@ -151,18 +151,18 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
151 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 151 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
152 if (!dhandle) 152 if (!dhandle)
153 return false; 153 return false;
154
154 status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); 155 status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
155 if (ACPI_FAILURE(status)) { 156 if (ACPI_FAILURE(status)) {
156 return false; 157 return false;
157 } 158 }
158 159
159 ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED, 160 ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
160 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); 161 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
161 if (ret < 0) 162 if (ret < 0)
162 return false; 163 return false;
163 164
164 nouveau_dsm_priv.dhandle = dhandle; 165 nouveau_dsm_priv.dhandle = dhandle;
165 nouveau_dsm_priv.dsm_handle = nvidia_handle;
166 return true; 166 return true;
167} 167}
168 168
@@ -173,6 +173,7 @@ static bool nouveau_dsm_detect(void)
173 struct pci_dev *pdev = NULL; 173 struct pci_dev *pdev = NULL;
174 int has_dsm = 0; 174 int has_dsm = 0;
175 int vga_count = 0; 175 int vga_count = 0;
176
176 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 177 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
177 vga_count++; 178 vga_count++;
178 179
@@ -180,7 +181,7 @@ static bool nouveau_dsm_detect(void)
180 } 181 }
181 182
182 if (vga_count == 2 && has_dsm) { 183 if (vga_count == 2 && has_dsm) {
183 acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer); 184 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
184 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 185 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
185 acpi_method_name); 186 acpi_method_name);
186 nouveau_dsm_priv.dsm_detected = true; 187 nouveau_dsm_priv.dsm_detected = true;
@@ -204,3 +205,57 @@ void nouveau_unregister_dsm_handler(void)
204{ 205{
205 vga_switcheroo_unregister_handler(); 206 vga_switcheroo_unregister_handler();
206} 207}
208
209/* retrieve the ROM in 4k blocks */
210static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
211 int offset, int len)
212{
213 acpi_status status;
214 union acpi_object rom_arg_elements[2], *obj;
215 struct acpi_object_list rom_arg;
216 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
217
218 rom_arg.count = 2;
219 rom_arg.pointer = &rom_arg_elements[0];
220
221 rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
222 rom_arg_elements[0].integer.value = offset;
223
224 rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
225 rom_arg_elements[1].integer.value = len;
226
227 status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
228 if (ACPI_FAILURE(status)) {
229 printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status));
230 return -ENODEV;
231 }
232 obj = (union acpi_object *)buffer.pointer;
233 memcpy(bios+offset, obj->buffer.pointer, len);
234 kfree(buffer.pointer);
235 return len;
236}
237
238bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
239{
240 acpi_status status;
241 acpi_handle dhandle, rom_handle;
242
243 if (!nouveau_dsm_priv.dsm_detected)
244 return false;
245
246 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
247 if (!dhandle)
248 return false;
249
250 status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
251 if (ACPI_FAILURE(status))
252 return false;
253
254 nouveau_dsm_priv.rom_handle = rom_handle;
255 return true;
256}
257
258int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
259{
260 return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
261}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index e7e69ccce5c9..9ba2deaadcc7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -178,6 +178,25 @@ out:
178 pci_disable_rom(dev->pdev); 178 pci_disable_rom(dev->pdev);
179} 179}
180 180
181static void load_vbios_acpi(struct drm_device *dev, uint8_t *data)
182{
183 int i;
184 int ret;
185 int size = 64 * 1024;
186
187 if (!nouveau_acpi_rom_supported(dev->pdev))
188 return;
189
190 for (i = 0; i < (size / ROM_BIOS_PAGE); i++) {
191 ret = nouveau_acpi_get_bios_chunk(data,
192 (i * ROM_BIOS_PAGE),
193 ROM_BIOS_PAGE);
194 if (ret <= 0)
195 break;
196 }
197 return;
198}
199
181struct methods { 200struct methods {
182 const char desc[8]; 201 const char desc[8];
183 void (*loadbios)(struct drm_device *, uint8_t *); 202 void (*loadbios)(struct drm_device *, uint8_t *);
@@ -191,6 +210,7 @@ static struct methods nv04_methods[] = {
191}; 210};
192 211
193static struct methods nv50_methods[] = { 212static struct methods nv50_methods[] = {
213 { "ACPI", load_vbios_acpi, true },
194 { "PRAMIN", load_vbios_pramin, true }, 214 { "PRAMIN", load_vbios_pramin, true },
195 { "PROM", load_vbios_prom, false }, 215 { "PROM", load_vbios_prom, false },
196 { "PCIROM", load_vbios_pci, true }, 216 { "PCIROM", load_vbios_pci, true },
@@ -2807,7 +2827,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2807 2827
2808 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); 2828 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
2809 2829
2810 nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); 2830 BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
2831 offset, gpio->tag, gpio->state_default);
2832 if (bios->execute)
2833 nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
2811 2834
2812 /* The NVIDIA binary driver doesn't appear to actually do 2835 /* The NVIDIA binary driver doesn't appear to actually do
2813 * any of this, my VBIOS does however. 2836 * any of this, my VBIOS does however.
@@ -5533,12 +5556,6 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5533 entry->bus = (conn >> 16) & 0xf; 5556 entry->bus = (conn >> 16) & 0xf;
5534 entry->location = (conn >> 20) & 0x3; 5557 entry->location = (conn >> 20) & 0x3;
5535 entry->or = (conn >> 24) & 0xf; 5558 entry->or = (conn >> 24) & 0xf;
5536 /*
5537 * Normal entries consist of a single bit, but dual link has the
5538 * next most significant bit set too
5539 */
5540 entry->duallink_possible =
5541 ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
5542 5559
5543 switch (entry->type) { 5560 switch (entry->type) {
5544 case OUTPUT_ANALOG: 5561 case OUTPUT_ANALOG:
@@ -5622,6 +5639,16 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5622 break; 5639 break;
5623 } 5640 }
5624 5641
5642 if (dcb->version < 0x40) {
5643 /* Normal entries consist of a single bit, but dual link has
5644 * the next most significant bit set too
5645 */
5646 entry->duallink_possible =
5647 ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
5648 } else {
5649 entry->duallink_possible = (entry->sorconf.link == 3);
5650 }
5651
5625 /* unsure what DCB version introduces this, 3.0? */ 5652 /* unsure what DCB version introduces this, 3.0? */
5626 if (conf & 0x100000) 5653 if (conf & 0x100000)
5627 entry->i2c_upper_default = true; 5654 entry->i2c_upper_default = true;
@@ -6205,6 +6232,30 @@ nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
6205 nouveau_i2c_fini(dev, entry); 6232 nouveau_i2c_fini(dev, entry);
6206} 6233}
6207 6234
6235static bool
6236nouveau_bios_posted(struct drm_device *dev)
6237{
6238 struct drm_nouveau_private *dev_priv = dev->dev_private;
6239 bool was_locked;
6240 unsigned htotal;
6241
6242 if (dev_priv->chipset >= NV_50) {
6243 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
6244 NVReadVgaCrtc(dev, 0, 0x1a) == 0)
6245 return false;
6246 return true;
6247 }
6248
6249 was_locked = NVLockVgaCrtcs(dev, false);
6250 htotal = NVReadVgaCrtc(dev, 0, 0x06);
6251 htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8;
6252 htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4;
6253 htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10;
6254 htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11;
6255 NVLockVgaCrtcs(dev, was_locked);
6256 return (htotal != 0);
6257}
6258
6208int 6259int
6209nouveau_bios_init(struct drm_device *dev) 6260nouveau_bios_init(struct drm_device *dev)
6210{ 6261{
@@ -6239,11 +6290,9 @@ nouveau_bios_init(struct drm_device *dev)
6239 bios->execute = false; 6290 bios->execute = false;
6240 6291
6241 /* ... unless card isn't POSTed already */ 6292 /* ... unless card isn't POSTed already */
6242 if (dev_priv->card_type >= NV_10 && 6293 if (!nouveau_bios_posted(dev)) {
6243 NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
6244 NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
6245 NV_INFO(dev, "Adaptor not initialised\n"); 6294 NV_INFO(dev, "Adaptor not initialised\n");
6246 if (dev_priv->card_type < NV_50) { 6295 if (dev_priv->card_type < NV_40) {
6247 NV_ERROR(dev, "Unable to POST this chipset\n"); 6296 NV_ERROR(dev, "Unable to POST this chipset\n");
6248 return -ENODEV; 6297 return -ENODEV;
6249 } 6298 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 266b0ff441af..149ed224c3cb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -432,24 +432,27 @@ nouveau_connector_set_property(struct drm_connector *connector,
432} 432}
433 433
434static struct drm_display_mode * 434static struct drm_display_mode *
435nouveau_connector_native_mode(struct nouveau_connector *connector) 435nouveau_connector_native_mode(struct drm_connector *connector)
436{ 436{
437 struct drm_device *dev = connector->base.dev; 437 struct drm_connector_helper_funcs *helper = connector->helper_private;
438 struct nouveau_connector *nv_connector = nouveau_connector(connector);
439 struct drm_device *dev = connector->dev;
438 struct drm_display_mode *mode, *largest = NULL; 440 struct drm_display_mode *mode, *largest = NULL;
439 int high_w = 0, high_h = 0, high_v = 0; 441 int high_w = 0, high_h = 0, high_v = 0;
440 442
441 /* Use preferred mode if there is one.. */ 443 list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
442 list_for_each_entry(mode, &connector->base.probed_modes, head) { 444 if (helper->mode_valid(connector, mode) != MODE_OK)
445 continue;
446
447 /* Use preferred mode if there is one.. */
443 if (mode->type & DRM_MODE_TYPE_PREFERRED) { 448 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
444 NV_DEBUG_KMS(dev, "native mode from preferred\n"); 449 NV_DEBUG_KMS(dev, "native mode from preferred\n");
445 return drm_mode_duplicate(dev, mode); 450 return drm_mode_duplicate(dev, mode);
446 } 451 }
447 }
448 452
449 /* Otherwise, take the resolution with the largest width, then height, 453 /* Otherwise, take the resolution with the largest width, then
450 * then vertical refresh 454 * height, then vertical refresh
451 */ 455 */
452 list_for_each_entry(mode, &connector->base.probed_modes, head) {
453 if (mode->hdisplay < high_w) 456 if (mode->hdisplay < high_w)
454 continue; 457 continue;
455 458
@@ -553,7 +556,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
553 */ 556 */
554 if (!nv_connector->native_mode) 557 if (!nv_connector->native_mode)
555 nv_connector->native_mode = 558 nv_connector->native_mode =
556 nouveau_connector_native_mode(nv_connector); 559 nouveau_connector_native_mode(connector);
557 if (ret == 0 && nv_connector->native_mode) { 560 if (ret == 0 && nv_connector->native_mode) {
558 struct drm_display_mode *mode; 561 struct drm_display_mode *mode;
559 562
@@ -584,9 +587,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
584 587
585 switch (nv_encoder->dcb->type) { 588 switch (nv_encoder->dcb->type) {
586 case OUTPUT_LVDS: 589 case OUTPUT_LVDS:
587 BUG_ON(!nv_connector->native_mode); 590 if (nv_connector->native_mode &&
588 if (mode->hdisplay > nv_connector->native_mode->hdisplay || 591 (mode->hdisplay > nv_connector->native_mode->hdisplay ||
589 mode->vdisplay > nv_connector->native_mode->vdisplay) 592 mode->vdisplay > nv_connector->native_mode->vdisplay))
590 return MODE_PANEL; 593 return MODE_PANEL;
591 594
592 min_clock = 0; 595 min_clock = 0;
@@ -594,8 +597,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
594 break; 597 break;
595 case OUTPUT_TMDS: 598 case OUTPUT_TMDS:
596 if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || 599 if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
597 (dev_priv->card_type < NV_50 && 600 !nv_encoder->dcb->duallink_possible)
598 !nv_encoder->dcb->duallink_possible))
599 max_clock = 165000; 601 max_clock = 165000;
600 else 602 else
601 max_clock = 330000; 603 max_clock = 330000;
@@ -729,7 +731,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
729 if (ret == 0) 731 if (ret == 0)
730 goto out; 732 goto out;
731 nv_connector->detected_encoder = nv_encoder; 733 nv_connector->detected_encoder = nv_encoder;
732 nv_connector->native_mode = nouveau_connector_native_mode(nv_connector); 734 nv_connector->native_mode = nouveau_connector_native_mode(connector);
733 list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) 735 list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
734 drm_mode_remove(connector, mode); 736 drm_mode_remove(connector, mode);
735 737
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 49fa7b2d257e..cb1ce2a09162 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -40,6 +40,8 @@ struct nouveau_crtc {
40 int sharpness; 40 int sharpness;
41 int last_dpms; 41 int last_dpms;
42 42
43 int cursor_saved_x, cursor_saved_y;
44
43 struct { 45 struct {
44 int cpp; 46 int cpp;
45 bool blanked; 47 bool blanked;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index c6079e36669d..273770432298 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -175,6 +175,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
175 nouveau_bo_unpin(nouveau_fb->nvbo); 175 nouveau_bo_unpin(nouveau_fb->nvbo);
176 } 176 }
177 177
178 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
179 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
180
181 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
182 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
183 }
184
178 NV_INFO(dev, "Evicting buffers...\n"); 185 NV_INFO(dev, "Evicting buffers...\n");
179 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); 186 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
180 187
@@ -314,12 +321,34 @@ nouveau_pci_resume(struct pci_dev *pdev)
314 nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); 321 nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
315 } 322 }
316 323
324 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
325 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
326 int ret;
327
328 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
329 if (!ret)
330 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
331 if (ret)
332 NV_ERROR(dev, "Could not pin/map cursor.\n");
333 }
334
317 if (dev_priv->card_type < NV_50) { 335 if (dev_priv->card_type < NV_50) {
318 nv04_display_restore(dev); 336 nv04_display_restore(dev);
319 NVLockVgaCrtcs(dev, false); 337 NVLockVgaCrtcs(dev, false);
320 } else 338 } else
321 nv50_display_init(dev); 339 nv50_display_init(dev);
322 340
341 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
342 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
343
344 nv_crtc->cursor.set_offset(nv_crtc,
345 nv_crtc->cursor.nvbo->bo.offset -
346 dev_priv->vm_vram_base);
347
348 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
349 nv_crtc->cursor_saved_y);
350 }
351
323 /* Force CLUT to get re-loaded during modeset */ 352 /* Force CLUT to get re-loaded during modeset */
324 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 353 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
325 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 354 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5b134438effe..c69719106489 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -851,12 +851,17 @@ extern int nouveau_dma_init(struct nouveau_channel *);
851extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); 851extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
852 852
853/* nouveau_acpi.c */ 853/* nouveau_acpi.c */
854#define ROM_BIOS_PAGE 4096
854#if defined(CONFIG_ACPI) 855#if defined(CONFIG_ACPI)
855void nouveau_register_dsm_handler(void); 856void nouveau_register_dsm_handler(void);
856void nouveau_unregister_dsm_handler(void); 857void nouveau_unregister_dsm_handler(void);
858int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
859bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
857#else 860#else
858static inline void nouveau_register_dsm_handler(void) {} 861static inline void nouveau_register_dsm_handler(void) {}
859static inline void nouveau_unregister_dsm_handler(void) {} 862static inline void nouveau_unregister_dsm_handler(void) {}
863static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
864static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
860#endif 865#endif
861 866
862/* nouveau_backlight.c */ 867/* nouveau_backlight.c */
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 775a7017af64..c1fd42b0dad1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -540,7 +540,8 @@ nouveau_mem_detect(struct drm_device *dev)
540 dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); 540 dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA);
541 dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; 541 dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
542 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) 542 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
543 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; 543 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
544 dev_priv->vram_sys_base <<= 12;
544 } 545 }
545 546
546 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); 547 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e632339c323e..147e59c40151 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -376,12 +376,15 @@ out_err:
376static void nouveau_switcheroo_set_state(struct pci_dev *pdev, 376static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
377 enum vga_switcheroo_state state) 377 enum vga_switcheroo_state state)
378{ 378{
379 struct drm_device *dev = pci_get_drvdata(pdev);
379 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 380 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
380 if (state == VGA_SWITCHEROO_ON) { 381 if (state == VGA_SWITCHEROO_ON) {
381 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); 382 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
382 nouveau_pci_resume(pdev); 383 nouveau_pci_resume(pdev);
384 drm_kms_helper_poll_enable(dev);
383 } else { 385 } else {
384 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); 386 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
387 drm_kms_helper_poll_disable(dev);
385 nouveau_pci_suspend(pdev, pmm); 388 nouveau_pci_suspend(pdev, pmm);
386 } 389 }
387} 390}
@@ -913,6 +916,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
913 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 916 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
914 getparam->value = dev_priv->vm_vram_base; 917 getparam->value = dev_priv->vm_vram_base;
915 break; 918 break;
919 case NOUVEAU_GETPARAM_PTIMER_TIME:
920 getparam->value = dev_priv->engine.timer.read(dev);
921 break;
916 case NOUVEAU_GETPARAM_GRAPH_UNITS: 922 case NOUVEAU_GETPARAM_GRAPH_UNITS:
917 /* NV40 and NV50 versions are quite different, but register 923 /* NV40 and NV50 versions are quite different, but register
918 * address is the same. User is supposed to know the card 924 * address is the same. User is supposed to know the card
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
index 89a91b9d8b25..aaf3de3bc816 100644
--- a/drivers/gpu/drm/nouveau/nv04_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
20static void 20static void
21nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) 21nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
22{ 22{
23 nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
23 NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, 24 NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
24 NV_PRAMDAC_CU_START_POS, 25 NV_PRAMDAC_CU_START_POS,
25 XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | 26 XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 753e723adb3a..03ad7ab14f09 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
107{ 107{
108 struct drm_device *dev = nv_crtc->base.dev; 108 struct drm_device *dev = nv_crtc->base.dev;
109 109
110 nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
110 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), 111 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
111 ((y & 0xFFFF) << 16) | (x & 0xFFFF)); 112 ((y & 0xFFFF) << 16) | (x & 0xFFFF));
112 /* Needed to make the cursor move. */ 113 /* Needed to make the cursor move. */
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index b11eaf9c5c7c..812778db76ac 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -274,7 +274,6 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
274int 274int
275nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) 275nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
276{ 276{
277 struct drm_nouveau_private *dev_priv = dev->dev_private;
278 struct nouveau_encoder *nv_encoder = NULL; 277 struct nouveau_encoder *nv_encoder = NULL;
279 struct drm_encoder *encoder; 278 struct drm_encoder *encoder;
280 bool dum; 279 bool dum;
@@ -324,11 +323,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
324 int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); 323 int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
325 uint32_t tmp; 324 uint32_t tmp;
326 325
327 if (dev_priv->chipset < 0x90 || 326 tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
328 dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
329 tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
330 else
331 tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
332 327
333 switch ((tmp & 0x00000f00) >> 8) { 328 switch ((tmp & 0x00000f00) >> 8) {
334 case 8: 329 case 8:
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 3c91312dea9a..84b1f2729d43 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -33,6 +33,9 @@ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
33$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable 33$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
34 $(call if_changed,mkregtable) 34 $(call if_changed,mkregtable)
35 35
36$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
37 $(call if_changed,mkregtable)
38
36$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h 39$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
37 40
38$(obj)/r200.o: $(obj)/r200_reg_safe.h 41$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -47,6 +50,8 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h
47 50
48$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h 51$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
49 52
53$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h
54
50radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ 55radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
51 radeon_irq.o r300_cmdbuf.o r600_cp.o 56 radeon_irq.o r300_cmdbuf.o r600_cp.o
52# add KMS driver 57# add KMS driver
@@ -60,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
60 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 65 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
61 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 66 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
62 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ 67 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
63 evergreen.o 68 evergreen.o evergreen_cs.o
64 69
65radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 70radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
66radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 71radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8c8e4d3cbaa3..0440c0939bdd 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -41,7 +41,12 @@ void evergreen_fini(struct radeon_device *rdev);
41 41
42void evergreen_pm_misc(struct radeon_device *rdev) 42void evergreen_pm_misc(struct radeon_device *rdev)
43{ 43{
44 int requested_index = rdev->pm.requested_power_state_index;
45 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
46 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
44 47
48 if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
49 radeon_atom_set_voltage(rdev, voltage->voltage);
45} 50}
46 51
47void evergreen_pm_prepare(struct radeon_device *rdev) 52void evergreen_pm_prepare(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
new file mode 100644
index 000000000000..64516b950891
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -0,0 +1,1356 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon.h"
30#include "evergreend.h"
31#include "evergreen_reg_safe.h"
32
33static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
34 struct radeon_cs_reloc **cs_reloc);
35
36struct evergreen_cs_track {
37 u32 group_size;
38 u32 nbanks;
39 u32 npipes;
40 /* value we track */
41 u32 nsamples;
42 u32 cb_color_base_last[12];
43 struct radeon_bo *cb_color_bo[12];
44 u32 cb_color_bo_offset[12];
45 struct radeon_bo *cb_color_fmask_bo[8];
46 struct radeon_bo *cb_color_cmask_bo[8];
47 u32 cb_color_info[12];
48 u32 cb_color_view[12];
49 u32 cb_color_pitch_idx[12];
50 u32 cb_color_slice_idx[12];
51 u32 cb_color_dim_idx[12];
52 u32 cb_color_dim[12];
53 u32 cb_color_pitch[12];
54 u32 cb_color_slice[12];
55 u32 cb_color_cmask_slice[8];
56 u32 cb_color_fmask_slice[8];
57 u32 cb_target_mask;
58 u32 cb_shader_mask;
59 u32 vgt_strmout_config;
60 u32 vgt_strmout_buffer_config;
61 u32 db_depth_control;
62 u32 db_depth_view;
63 u32 db_depth_size;
64 u32 db_depth_size_idx;
65 u32 db_z_info;
66 u32 db_z_idx;
67 u32 db_z_read_offset;
68 u32 db_z_write_offset;
69 struct radeon_bo *db_z_read_bo;
70 struct radeon_bo *db_z_write_bo;
71 u32 db_s_info;
72 u32 db_s_idx;
73 u32 db_s_read_offset;
74 u32 db_s_write_offset;
75 struct radeon_bo *db_s_read_bo;
76 struct radeon_bo *db_s_write_bo;
77};
78
79static void evergreen_cs_track_init(struct evergreen_cs_track *track)
80{
81 int i;
82
83 for (i = 0; i < 8; i++) {
84 track->cb_color_fmask_bo[i] = NULL;
85 track->cb_color_cmask_bo[i] = NULL;
86 track->cb_color_cmask_slice[i] = 0;
87 track->cb_color_fmask_slice[i] = 0;
88 }
89
90 for (i = 0; i < 12; i++) {
91 track->cb_color_base_last[i] = 0;
92 track->cb_color_bo[i] = NULL;
93 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
94 track->cb_color_info[i] = 0;
95 track->cb_color_view[i] = 0;
96 track->cb_color_pitch_idx[i] = 0;
97 track->cb_color_slice_idx[i] = 0;
98 track->cb_color_dim[i] = 0;
99 track->cb_color_pitch[i] = 0;
100 track->cb_color_slice[i] = 0;
101 track->cb_color_dim[i] = 0;
102 }
103 track->cb_target_mask = 0xFFFFFFFF;
104 track->cb_shader_mask = 0xFFFFFFFF;
105
106 track->db_depth_view = 0xFFFFC000;
107 track->db_depth_size = 0xFFFFFFFF;
108 track->db_depth_size_idx = 0;
109 track->db_depth_control = 0xFFFFFFFF;
110 track->db_z_info = 0xFFFFFFFF;
111 track->db_z_idx = 0xFFFFFFFF;
112 track->db_z_read_offset = 0xFFFFFFFF;
113 track->db_z_write_offset = 0xFFFFFFFF;
114 track->db_z_read_bo = NULL;
115 track->db_z_write_bo = NULL;
116 track->db_s_info = 0xFFFFFFFF;
117 track->db_s_idx = 0xFFFFFFFF;
118 track->db_s_read_offset = 0xFFFFFFFF;
119 track->db_s_write_offset = 0xFFFFFFFF;
120 track->db_s_read_bo = NULL;
121 track->db_s_write_bo = NULL;
122}
123
124static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
125{
126 /* XXX fill in */
127 return 0;
128}
129
130static int evergreen_cs_track_check(struct radeon_cs_parser *p)
131{
132 struct evergreen_cs_track *track = p->track;
133
134 /* we don't support stream out buffer yet */
135 if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
136 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
137 return -EINVAL;
138 }
139
140 /* XXX fill in */
141 return 0;
142}
143
144/**
145 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
146 * @parser: parser structure holding parsing context.
147 * @pkt: where to store packet informations
148 *
149 * Assume that chunk_ib_index is properly set. Will return -EINVAL
150 * if packet is bigger than remaining ib size. or if packets is unknown.
151 **/
152int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
153 struct radeon_cs_packet *pkt,
154 unsigned idx)
155{
156 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
157 uint32_t header;
158
159 if (idx >= ib_chunk->length_dw) {
160 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
161 idx, ib_chunk->length_dw);
162 return -EINVAL;
163 }
164 header = radeon_get_ib_value(p, idx);
165 pkt->idx = idx;
166 pkt->type = CP_PACKET_GET_TYPE(header);
167 pkt->count = CP_PACKET_GET_COUNT(header);
168 pkt->one_reg_wr = 0;
169 switch (pkt->type) {
170 case PACKET_TYPE0:
171 pkt->reg = CP_PACKET0_GET_REG(header);
172 break;
173 case PACKET_TYPE3:
174 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
175 break;
176 case PACKET_TYPE2:
177 pkt->count = -1;
178 break;
179 default:
180 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
181 return -EINVAL;
182 }
183 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
184 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
185 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
186 return -EINVAL;
187 }
188 return 0;
189}
190
191/**
192 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
193 * @parser: parser structure holding parsing context.
194 * @data: pointer to relocation data
195 * @offset_start: starting offset
196 * @offset_mask: offset mask (to align start offset on)
197 * @reloc: reloc informations
198 *
199 * Check next packet is relocation packet3, do bo validation and compute
200 * GPU offset using the provided start.
201 **/
202static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
203 struct radeon_cs_reloc **cs_reloc)
204{
205 struct radeon_cs_chunk *relocs_chunk;
206 struct radeon_cs_packet p3reloc;
207 unsigned idx;
208 int r;
209
210 if (p->chunk_relocs_idx == -1) {
211 DRM_ERROR("No relocation chunk !\n");
212 return -EINVAL;
213 }
214 *cs_reloc = NULL;
215 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
216 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
217 if (r) {
218 return r;
219 }
220 p->idx += p3reloc.count + 2;
221 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
222 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
223 p3reloc.idx);
224 return -EINVAL;
225 }
226 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
227 if (idx >= relocs_chunk->length_dw) {
228 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
229 idx, relocs_chunk->length_dw);
230 return -EINVAL;
231 }
232 /* FIXME: we assume reloc size is 4 dwords */
233 *cs_reloc = p->relocs_ptr[(idx / 4)];
234 return 0;
235}
236
237/**
238 * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
239 * @parser: parser structure holding parsing context.
240 *
241 * Check next packet is relocation packet3, do bo validation and compute
242 * GPU offset using the provided start.
243 **/
244static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
245{
246 struct radeon_cs_packet p3reloc;
247 int r;
248
249 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
250 if (r) {
251 return 0;
252 }
253 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
254 return 0;
255 }
256 return 1;
257}
258
259/**
260 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
261 * @parser: parser structure holding parsing context.
262 *
263 * Userspace sends a special sequence for VLINE waits.
264 * PACKET0 - VLINE_START_END + value
265 * PACKET3 - WAIT_REG_MEM poll vline status reg
266 * RELOC (P3) - crtc_id in reloc.
267 *
268 * This function parses this and relocates the VLINE START END
269 * and WAIT_REG_MEM packets to the correct crtc.
270 * It also detects a switched off crtc and nulls out the
271 * wait in that case.
272 */
273static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
274{
275 struct drm_mode_object *obj;
276 struct drm_crtc *crtc;
277 struct radeon_crtc *radeon_crtc;
278 struct radeon_cs_packet p3reloc, wait_reg_mem;
279 int crtc_id;
280 int r;
281 uint32_t header, h_idx, reg, wait_reg_mem_info;
282 volatile uint32_t *ib;
283
284 ib = p->ib->ptr;
285
286 /* parse the WAIT_REG_MEM */
287 r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
288 if (r)
289 return r;
290
291 /* check its a WAIT_REG_MEM */
292 if (wait_reg_mem.type != PACKET_TYPE3 ||
293 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
294 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
295 r = -EINVAL;
296 return r;
297 }
298
299 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
300 /* bit 4 is reg (0) or mem (1) */
301 if (wait_reg_mem_info & 0x10) {
302 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
303 r = -EINVAL;
304 return r;
305 }
306 /* waiting for value to be equal */
307 if ((wait_reg_mem_info & 0x7) != 0x3) {
308 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
309 r = -EINVAL;
310 return r;
311 }
312 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
313 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
314 r = -EINVAL;
315 return r;
316 }
317
318 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
319 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
320 r = -EINVAL;
321 return r;
322 }
323
324 /* jump over the NOP */
325 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
326 if (r)
327 return r;
328
329 h_idx = p->idx - 2;
330 p->idx += wait_reg_mem.count + 2;
331 p->idx += p3reloc.count + 2;
332
333 header = radeon_get_ib_value(p, h_idx);
334 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
335 reg = CP_PACKET0_GET_REG(header);
336 mutex_lock(&p->rdev->ddev->mode_config.mutex);
337 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
338 if (!obj) {
339 DRM_ERROR("cannot find crtc %d\n", crtc_id);
340 r = -EINVAL;
341 goto out;
342 }
343 crtc = obj_to_crtc(obj);
344 radeon_crtc = to_radeon_crtc(crtc);
345 crtc_id = radeon_crtc->crtc_id;
346
347 if (!crtc->enabled) {
348 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
349 ib[h_idx + 2] = PACKET2(0);
350 ib[h_idx + 3] = PACKET2(0);
351 ib[h_idx + 4] = PACKET2(0);
352 ib[h_idx + 5] = PACKET2(0);
353 ib[h_idx + 6] = PACKET2(0);
354 ib[h_idx + 7] = PACKET2(0);
355 ib[h_idx + 8] = PACKET2(0);
356 } else {
357 switch (reg) {
358 case EVERGREEN_VLINE_START_END:
359 header &= ~R600_CP_PACKET0_REG_MASK;
360 header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
361 ib[h_idx] = header;
362 ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
363 break;
364 default:
365 DRM_ERROR("unknown crtc reloc\n");
366 r = -EINVAL;
367 goto out;
368 }
369 }
370out:
371 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
372 return r;
373}
374
375static int evergreen_packet0_check(struct radeon_cs_parser *p,
376 struct radeon_cs_packet *pkt,
377 unsigned idx, unsigned reg)
378{
379 int r;
380
381 switch (reg) {
382 case EVERGREEN_VLINE_START_END:
383 r = evergreen_cs_packet_parse_vline(p);
384 if (r) {
385 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
386 idx, reg);
387 return r;
388 }
389 break;
390 default:
391 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
392 reg, idx);
393 return -EINVAL;
394 }
395 return 0;
396}
397
398static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
399 struct radeon_cs_packet *pkt)
400{
401 unsigned reg, i;
402 unsigned idx;
403 int r;
404
405 idx = pkt->idx + 1;
406 reg = pkt->reg;
407 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
408 r = evergreen_packet0_check(p, pkt, idx, reg);
409 if (r) {
410 return r;
411 }
412 }
413 return 0;
414}
415
416/**
417 * evergreen_cs_check_reg() - check if register is authorized or not
418 * @parser: parser structure holding parsing context
419 * @reg: register we are testing
420 * @idx: index into the cs buffer
421 *
422 * This function will test against evergreen_reg_safe_bm and return 0
423 * if register is safe. If register is not flag as safe this function
424 * will test it against a list of register needind special handling.
425 */
426static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
427{
428 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
429 struct radeon_cs_reloc *reloc;
430 u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
431 u32 m, i, tmp, *ib;
432 int r;
433
434 i = (reg >> 7);
435 if (i > last_reg) {
436 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
437 return -EINVAL;
438 }
439 m = 1 << ((reg >> 2) & 31);
440 if (!(evergreen_reg_safe_bm[i] & m))
441 return 0;
442 ib = p->ib->ptr;
443 switch (reg) {
444 /* force following reg to 0 in an attemp to disable out buffer
445 * which will need us to better understand how it works to perform
446 * security check on it (Jerome)
447 */
448 case SQ_ESGS_RING_SIZE:
449 case SQ_GSVS_RING_SIZE:
450 case SQ_ESTMP_RING_SIZE:
451 case SQ_GSTMP_RING_SIZE:
452 case SQ_HSTMP_RING_SIZE:
453 case SQ_LSTMP_RING_SIZE:
454 case SQ_PSTMP_RING_SIZE:
455 case SQ_VSTMP_RING_SIZE:
456 case SQ_ESGS_RING_ITEMSIZE:
457 case SQ_ESTMP_RING_ITEMSIZE:
458 case SQ_GSTMP_RING_ITEMSIZE:
459 case SQ_GSVS_RING_ITEMSIZE:
460 case SQ_GS_VERT_ITEMSIZE:
461 case SQ_GS_VERT_ITEMSIZE_1:
462 case SQ_GS_VERT_ITEMSIZE_2:
463 case SQ_GS_VERT_ITEMSIZE_3:
464 case SQ_GSVS_RING_OFFSET_1:
465 case SQ_GSVS_RING_OFFSET_2:
466 case SQ_GSVS_RING_OFFSET_3:
467 case SQ_HSTMP_RING_ITEMSIZE:
468 case SQ_LSTMP_RING_ITEMSIZE:
469 case SQ_PSTMP_RING_ITEMSIZE:
470 case SQ_VSTMP_RING_ITEMSIZE:
471 case VGT_TF_RING_SIZE:
472 /* get value to populate the IB don't remove */
473 tmp =radeon_get_ib_value(p, idx);
474 ib[idx] = 0;
475 break;
476 case DB_DEPTH_CONTROL:
477 track->db_depth_control = radeon_get_ib_value(p, idx);
478 break;
479 case DB_Z_INFO:
480 r = evergreen_cs_packet_next_reloc(p, &reloc);
481 if (r) {
482 dev_warn(p->dev, "bad SET_CONTEXT_REG "
483 "0x%04X\n", reg);
484 return -EINVAL;
485 }
486 track->db_z_info = radeon_get_ib_value(p, idx);
487 ib[idx] &= ~Z_ARRAY_MODE(0xf);
488 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
489 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
490 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
491 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
492 } else {
493 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
494 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
495 }
496 break;
497 case DB_STENCIL_INFO:
498 track->db_s_info = radeon_get_ib_value(p, idx);
499 break;
500 case DB_DEPTH_VIEW:
501 track->db_depth_view = radeon_get_ib_value(p, idx);
502 break;
503 case DB_DEPTH_SIZE:
504 track->db_depth_size = radeon_get_ib_value(p, idx);
505 track->db_depth_size_idx = idx;
506 break;
507 case DB_Z_READ_BASE:
508 r = evergreen_cs_packet_next_reloc(p, &reloc);
509 if (r) {
510 dev_warn(p->dev, "bad SET_CONTEXT_REG "
511 "0x%04X\n", reg);
512 return -EINVAL;
513 }
514 track->db_z_read_offset = radeon_get_ib_value(p, idx);
515 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
516 track->db_z_read_bo = reloc->robj;
517 break;
518 case DB_Z_WRITE_BASE:
519 r = evergreen_cs_packet_next_reloc(p, &reloc);
520 if (r) {
521 dev_warn(p->dev, "bad SET_CONTEXT_REG "
522 "0x%04X\n", reg);
523 return -EINVAL;
524 }
525 track->db_z_write_offset = radeon_get_ib_value(p, idx);
526 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
527 track->db_z_write_bo = reloc->robj;
528 break;
529 case DB_STENCIL_READ_BASE:
530 r = evergreen_cs_packet_next_reloc(p, &reloc);
531 if (r) {
532 dev_warn(p->dev, "bad SET_CONTEXT_REG "
533 "0x%04X\n", reg);
534 return -EINVAL;
535 }
536 track->db_s_read_offset = radeon_get_ib_value(p, idx);
537 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
538 track->db_s_read_bo = reloc->robj;
539 break;
540 case DB_STENCIL_WRITE_BASE:
541 r = evergreen_cs_packet_next_reloc(p, &reloc);
542 if (r) {
543 dev_warn(p->dev, "bad SET_CONTEXT_REG "
544 "0x%04X\n", reg);
545 return -EINVAL;
546 }
547 track->db_s_write_offset = radeon_get_ib_value(p, idx);
548 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
549 track->db_s_write_bo = reloc->robj;
550 break;
551 case VGT_STRMOUT_CONFIG:
552 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
553 break;
554 case VGT_STRMOUT_BUFFER_CONFIG:
555 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
556 break;
557 case CB_TARGET_MASK:
558 track->cb_target_mask = radeon_get_ib_value(p, idx);
559 break;
560 case CB_SHADER_MASK:
561 track->cb_shader_mask = radeon_get_ib_value(p, idx);
562 break;
563 case PA_SC_AA_CONFIG:
564 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
565 track->nsamples = 1 << tmp;
566 break;
567 case CB_COLOR0_VIEW:
568 case CB_COLOR1_VIEW:
569 case CB_COLOR2_VIEW:
570 case CB_COLOR3_VIEW:
571 case CB_COLOR4_VIEW:
572 case CB_COLOR5_VIEW:
573 case CB_COLOR6_VIEW:
574 case CB_COLOR7_VIEW:
575 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
576 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
577 break;
578 case CB_COLOR8_VIEW:
579 case CB_COLOR9_VIEW:
580 case CB_COLOR10_VIEW:
581 case CB_COLOR11_VIEW:
582 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
583 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
584 break;
585 case CB_COLOR0_INFO:
586 case CB_COLOR1_INFO:
587 case CB_COLOR2_INFO:
588 case CB_COLOR3_INFO:
589 case CB_COLOR4_INFO:
590 case CB_COLOR5_INFO:
591 case CB_COLOR6_INFO:
592 case CB_COLOR7_INFO:
593 r = evergreen_cs_packet_next_reloc(p, &reloc);
594 if (r) {
595 dev_warn(p->dev, "bad SET_CONTEXT_REG "
596 "0x%04X\n", reg);
597 return -EINVAL;
598 }
599 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
600 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
601 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
602 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
603 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
604 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
605 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
606 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
607 }
608 break;
609 case CB_COLOR8_INFO:
610 case CB_COLOR9_INFO:
611 case CB_COLOR10_INFO:
612 case CB_COLOR11_INFO:
613 r = evergreen_cs_packet_next_reloc(p, &reloc);
614 if (r) {
615 dev_warn(p->dev, "bad SET_CONTEXT_REG "
616 "0x%04X\n", reg);
617 return -EINVAL;
618 }
619 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
620 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
621 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
624 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
625 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
626 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
627 }
628 break;
629 case CB_COLOR0_PITCH:
630 case CB_COLOR1_PITCH:
631 case CB_COLOR2_PITCH:
632 case CB_COLOR3_PITCH:
633 case CB_COLOR4_PITCH:
634 case CB_COLOR5_PITCH:
635 case CB_COLOR6_PITCH:
636 case CB_COLOR7_PITCH:
637 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
638 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
639 track->cb_color_pitch_idx[tmp] = idx;
640 break;
641 case CB_COLOR8_PITCH:
642 case CB_COLOR9_PITCH:
643 case CB_COLOR10_PITCH:
644 case CB_COLOR11_PITCH:
645 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
646 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
647 track->cb_color_pitch_idx[tmp] = idx;
648 break;
649 case CB_COLOR0_SLICE:
650 case CB_COLOR1_SLICE:
651 case CB_COLOR2_SLICE:
652 case CB_COLOR3_SLICE:
653 case CB_COLOR4_SLICE:
654 case CB_COLOR5_SLICE:
655 case CB_COLOR6_SLICE:
656 case CB_COLOR7_SLICE:
657 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
658 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
659 track->cb_color_slice_idx[tmp] = idx;
660 break;
661 case CB_COLOR8_SLICE:
662 case CB_COLOR9_SLICE:
663 case CB_COLOR10_SLICE:
664 case CB_COLOR11_SLICE:
665 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
666 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
667 track->cb_color_slice_idx[tmp] = idx;
668 break;
669 case CB_COLOR0_ATTRIB:
670 case CB_COLOR1_ATTRIB:
671 case CB_COLOR2_ATTRIB:
672 case CB_COLOR3_ATTRIB:
673 case CB_COLOR4_ATTRIB:
674 case CB_COLOR5_ATTRIB:
675 case CB_COLOR6_ATTRIB:
676 case CB_COLOR7_ATTRIB:
677 case CB_COLOR8_ATTRIB:
678 case CB_COLOR9_ATTRIB:
679 case CB_COLOR10_ATTRIB:
680 case CB_COLOR11_ATTRIB:
681 break;
682 case CB_COLOR0_DIM:
683 case CB_COLOR1_DIM:
684 case CB_COLOR2_DIM:
685 case CB_COLOR3_DIM:
686 case CB_COLOR4_DIM:
687 case CB_COLOR5_DIM:
688 case CB_COLOR6_DIM:
689 case CB_COLOR7_DIM:
690 tmp = (reg - CB_COLOR0_DIM) / 0x3c;
691 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
692 track->cb_color_dim_idx[tmp] = idx;
693 break;
694 case CB_COLOR8_DIM:
695 case CB_COLOR9_DIM:
696 case CB_COLOR10_DIM:
697 case CB_COLOR11_DIM:
698 tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
699 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
700 track->cb_color_dim_idx[tmp] = idx;
701 break;
702 case CB_COLOR0_FMASK:
703 case CB_COLOR1_FMASK:
704 case CB_COLOR2_FMASK:
705 case CB_COLOR3_FMASK:
706 case CB_COLOR4_FMASK:
707 case CB_COLOR5_FMASK:
708 case CB_COLOR6_FMASK:
709 case CB_COLOR7_FMASK:
710 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
711 r = evergreen_cs_packet_next_reloc(p, &reloc);
712 if (r) {
713 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
714 return -EINVAL;
715 }
716 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
717 track->cb_color_fmask_bo[tmp] = reloc->robj;
718 break;
719 case CB_COLOR0_CMASK:
720 case CB_COLOR1_CMASK:
721 case CB_COLOR2_CMASK:
722 case CB_COLOR3_CMASK:
723 case CB_COLOR4_CMASK:
724 case CB_COLOR5_CMASK:
725 case CB_COLOR6_CMASK:
726 case CB_COLOR7_CMASK:
727 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
728 r = evergreen_cs_packet_next_reloc(p, &reloc);
729 if (r) {
730 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
731 return -EINVAL;
732 }
733 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
734 track->cb_color_cmask_bo[tmp] = reloc->robj;
735 break;
736 case CB_COLOR0_FMASK_SLICE:
737 case CB_COLOR1_FMASK_SLICE:
738 case CB_COLOR2_FMASK_SLICE:
739 case CB_COLOR3_FMASK_SLICE:
740 case CB_COLOR4_FMASK_SLICE:
741 case CB_COLOR5_FMASK_SLICE:
742 case CB_COLOR6_FMASK_SLICE:
743 case CB_COLOR7_FMASK_SLICE:
744 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
745 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
746 break;
747 case CB_COLOR0_CMASK_SLICE:
748 case CB_COLOR1_CMASK_SLICE:
749 case CB_COLOR2_CMASK_SLICE:
750 case CB_COLOR3_CMASK_SLICE:
751 case CB_COLOR4_CMASK_SLICE:
752 case CB_COLOR5_CMASK_SLICE:
753 case CB_COLOR6_CMASK_SLICE:
754 case CB_COLOR7_CMASK_SLICE:
755 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
756 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
757 break;
758 case CB_COLOR0_BASE:
759 case CB_COLOR1_BASE:
760 case CB_COLOR2_BASE:
761 case CB_COLOR3_BASE:
762 case CB_COLOR4_BASE:
763 case CB_COLOR5_BASE:
764 case CB_COLOR6_BASE:
765 case CB_COLOR7_BASE:
766 r = evergreen_cs_packet_next_reloc(p, &reloc);
767 if (r) {
768 dev_warn(p->dev, "bad SET_CONTEXT_REG "
769 "0x%04X\n", reg);
770 return -EINVAL;
771 }
772 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
773 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
774 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
775 track->cb_color_base_last[tmp] = ib[idx];
776 track->cb_color_bo[tmp] = reloc->robj;
777 break;
778 case CB_COLOR8_BASE:
779 case CB_COLOR9_BASE:
780 case CB_COLOR10_BASE:
781 case CB_COLOR11_BASE:
782 r = evergreen_cs_packet_next_reloc(p, &reloc);
783 if (r) {
784 dev_warn(p->dev, "bad SET_CONTEXT_REG "
785 "0x%04X\n", reg);
786 return -EINVAL;
787 }
788 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
789 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
790 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
791 track->cb_color_base_last[tmp] = ib[idx];
792 track->cb_color_bo[tmp] = reloc->robj;
793 break;
794 case CB_IMMED0_BASE:
795 case CB_IMMED1_BASE:
796 case CB_IMMED2_BASE:
797 case CB_IMMED3_BASE:
798 case CB_IMMED4_BASE:
799 case CB_IMMED5_BASE:
800 case CB_IMMED6_BASE:
801 case CB_IMMED7_BASE:
802 case CB_IMMED8_BASE:
803 case CB_IMMED9_BASE:
804 case CB_IMMED10_BASE:
805 case CB_IMMED11_BASE:
806 case DB_HTILE_DATA_BASE:
807 case SQ_PGM_START_FS:
808 case SQ_PGM_START_ES:
809 case SQ_PGM_START_VS:
810 case SQ_PGM_START_GS:
811 case SQ_PGM_START_PS:
812 case SQ_PGM_START_HS:
813 case SQ_PGM_START_LS:
814 case GDS_ADDR_BASE:
815 case SQ_CONST_MEM_BASE:
816 case SQ_ALU_CONST_CACHE_GS_0:
817 case SQ_ALU_CONST_CACHE_GS_1:
818 case SQ_ALU_CONST_CACHE_GS_2:
819 case SQ_ALU_CONST_CACHE_GS_3:
820 case SQ_ALU_CONST_CACHE_GS_4:
821 case SQ_ALU_CONST_CACHE_GS_5:
822 case SQ_ALU_CONST_CACHE_GS_6:
823 case SQ_ALU_CONST_CACHE_GS_7:
824 case SQ_ALU_CONST_CACHE_GS_8:
825 case SQ_ALU_CONST_CACHE_GS_9:
826 case SQ_ALU_CONST_CACHE_GS_10:
827 case SQ_ALU_CONST_CACHE_GS_11:
828 case SQ_ALU_CONST_CACHE_GS_12:
829 case SQ_ALU_CONST_CACHE_GS_13:
830 case SQ_ALU_CONST_CACHE_GS_14:
831 case SQ_ALU_CONST_CACHE_GS_15:
832 case SQ_ALU_CONST_CACHE_PS_0:
833 case SQ_ALU_CONST_CACHE_PS_1:
834 case SQ_ALU_CONST_CACHE_PS_2:
835 case SQ_ALU_CONST_CACHE_PS_3:
836 case SQ_ALU_CONST_CACHE_PS_4:
837 case SQ_ALU_CONST_CACHE_PS_5:
838 case SQ_ALU_CONST_CACHE_PS_6:
839 case SQ_ALU_CONST_CACHE_PS_7:
840 case SQ_ALU_CONST_CACHE_PS_8:
841 case SQ_ALU_CONST_CACHE_PS_9:
842 case SQ_ALU_CONST_CACHE_PS_10:
843 case SQ_ALU_CONST_CACHE_PS_11:
844 case SQ_ALU_CONST_CACHE_PS_12:
845 case SQ_ALU_CONST_CACHE_PS_13:
846 case SQ_ALU_CONST_CACHE_PS_14:
847 case SQ_ALU_CONST_CACHE_PS_15:
848 case SQ_ALU_CONST_CACHE_VS_0:
849 case SQ_ALU_CONST_CACHE_VS_1:
850 case SQ_ALU_CONST_CACHE_VS_2:
851 case SQ_ALU_CONST_CACHE_VS_3:
852 case SQ_ALU_CONST_CACHE_VS_4:
853 case SQ_ALU_CONST_CACHE_VS_5:
854 case SQ_ALU_CONST_CACHE_VS_6:
855 case SQ_ALU_CONST_CACHE_VS_7:
856 case SQ_ALU_CONST_CACHE_VS_8:
857 case SQ_ALU_CONST_CACHE_VS_9:
858 case SQ_ALU_CONST_CACHE_VS_10:
859 case SQ_ALU_CONST_CACHE_VS_11:
860 case SQ_ALU_CONST_CACHE_VS_12:
861 case SQ_ALU_CONST_CACHE_VS_13:
862 case SQ_ALU_CONST_CACHE_VS_14:
863 case SQ_ALU_CONST_CACHE_VS_15:
864 case SQ_ALU_CONST_CACHE_HS_0:
865 case SQ_ALU_CONST_CACHE_HS_1:
866 case SQ_ALU_CONST_CACHE_HS_2:
867 case SQ_ALU_CONST_CACHE_HS_3:
868 case SQ_ALU_CONST_CACHE_HS_4:
869 case SQ_ALU_CONST_CACHE_HS_5:
870 case SQ_ALU_CONST_CACHE_HS_6:
871 case SQ_ALU_CONST_CACHE_HS_7:
872 case SQ_ALU_CONST_CACHE_HS_8:
873 case SQ_ALU_CONST_CACHE_HS_9:
874 case SQ_ALU_CONST_CACHE_HS_10:
875 case SQ_ALU_CONST_CACHE_HS_11:
876 case SQ_ALU_CONST_CACHE_HS_12:
877 case SQ_ALU_CONST_CACHE_HS_13:
878 case SQ_ALU_CONST_CACHE_HS_14:
879 case SQ_ALU_CONST_CACHE_HS_15:
880 case SQ_ALU_CONST_CACHE_LS_0:
881 case SQ_ALU_CONST_CACHE_LS_1:
882 case SQ_ALU_CONST_CACHE_LS_2:
883 case SQ_ALU_CONST_CACHE_LS_3:
884 case SQ_ALU_CONST_CACHE_LS_4:
885 case SQ_ALU_CONST_CACHE_LS_5:
886 case SQ_ALU_CONST_CACHE_LS_6:
887 case SQ_ALU_CONST_CACHE_LS_7:
888 case SQ_ALU_CONST_CACHE_LS_8:
889 case SQ_ALU_CONST_CACHE_LS_9:
890 case SQ_ALU_CONST_CACHE_LS_10:
891 case SQ_ALU_CONST_CACHE_LS_11:
892 case SQ_ALU_CONST_CACHE_LS_12:
893 case SQ_ALU_CONST_CACHE_LS_13:
894 case SQ_ALU_CONST_CACHE_LS_14:
895 case SQ_ALU_CONST_CACHE_LS_15:
896 r = evergreen_cs_packet_next_reloc(p, &reloc);
897 if (r) {
898 dev_warn(p->dev, "bad SET_CONTEXT_REG "
899 "0x%04X\n", reg);
900 return -EINVAL;
901 }
902 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
903 break;
904 default:
905 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
906 return -EINVAL;
907 }
908 return 0;
909}
910
911/**
912 * evergreen_check_texture_resource() - check if register is authorized or not
913 * @p: parser structure holding parsing context
914 * @idx: index into the cs buffer
915 * @texture: texture's bo structure
916 * @mipmap: mipmap's bo structure
917 *
918 * This function will check that the resource has valid field and that
919 * the texture and mipmap bo object are big enough to cover this resource.
920 */
921static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
922 struct radeon_bo *texture,
923 struct radeon_bo *mipmap)
924{
925 /* XXX fill in */
926 return 0;
927}
928
929static int evergreen_packet3_check(struct radeon_cs_parser *p,
930 struct radeon_cs_packet *pkt)
931{
932 struct radeon_cs_reloc *reloc;
933 struct evergreen_cs_track *track;
934 volatile u32 *ib;
935 unsigned idx;
936 unsigned i;
937 unsigned start_reg, end_reg, reg;
938 int r;
939 u32 idx_value;
940
941 track = (struct evergreen_cs_track *)p->track;
942 ib = p->ib->ptr;
943 idx = pkt->idx + 1;
944 idx_value = radeon_get_ib_value(p, idx);
945
946 switch (pkt->opcode) {
947 case PACKET3_CONTEXT_CONTROL:
948 if (pkt->count != 1) {
949 DRM_ERROR("bad CONTEXT_CONTROL\n");
950 return -EINVAL;
951 }
952 break;
953 case PACKET3_INDEX_TYPE:
954 case PACKET3_NUM_INSTANCES:
955 case PACKET3_CLEAR_STATE:
956 if (pkt->count) {
957 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
958 return -EINVAL;
959 }
960 break;
961 case PACKET3_INDEX_BASE:
962 if (pkt->count != 1) {
963 DRM_ERROR("bad INDEX_BASE\n");
964 return -EINVAL;
965 }
966 r = evergreen_cs_packet_next_reloc(p, &reloc);
967 if (r) {
968 DRM_ERROR("bad INDEX_BASE\n");
969 return -EINVAL;
970 }
971 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
972 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
973 r = evergreen_cs_track_check(p);
974 if (r) {
975 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
976 return r;
977 }
978 break;
979 case PACKET3_DRAW_INDEX:
980 if (pkt->count != 3) {
981 DRM_ERROR("bad DRAW_INDEX\n");
982 return -EINVAL;
983 }
984 r = evergreen_cs_packet_next_reloc(p, &reloc);
985 if (r) {
986 DRM_ERROR("bad DRAW_INDEX\n");
987 return -EINVAL;
988 }
989 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
990 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
991 r = evergreen_cs_track_check(p);
992 if (r) {
993 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
994 return r;
995 }
996 break;
997 case PACKET3_DRAW_INDEX_2:
998 if (pkt->count != 4) {
999 DRM_ERROR("bad DRAW_INDEX_2\n");
1000 return -EINVAL;
1001 }
1002 r = evergreen_cs_packet_next_reloc(p, &reloc);
1003 if (r) {
1004 DRM_ERROR("bad DRAW_INDEX_2\n");
1005 return -EINVAL;
1006 }
1007 ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1008 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1009 r = evergreen_cs_track_check(p);
1010 if (r) {
1011 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1012 return r;
1013 }
1014 break;
1015 case PACKET3_DRAW_INDEX_AUTO:
1016 if (pkt->count != 1) {
1017 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1018 return -EINVAL;
1019 }
1020 r = evergreen_cs_track_check(p);
1021 if (r) {
1022 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1023 return r;
1024 }
1025 break;
1026 case PACKET3_DRAW_INDEX_MULTI_AUTO:
1027 if (pkt->count != 2) {
1028 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1029 return -EINVAL;
1030 }
1031 r = evergreen_cs_track_check(p);
1032 if (r) {
1033 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1034 return r;
1035 }
1036 break;
1037 case PACKET3_DRAW_INDEX_IMMD:
1038 if (pkt->count < 2) {
1039 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1040 return -EINVAL;
1041 }
1042 r = evergreen_cs_track_check(p);
1043 if (r) {
1044 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1045 return r;
1046 }
1047 break;
1048 case PACKET3_DRAW_INDEX_OFFSET:
1049 if (pkt->count != 2) {
1050 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1051 return -EINVAL;
1052 }
1053 r = evergreen_cs_track_check(p);
1054 if (r) {
1055 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1056 return r;
1057 }
1058 break;
1059 case PACKET3_DRAW_INDEX_OFFSET_2:
1060 if (pkt->count != 3) {
1061 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
1062 return -EINVAL;
1063 }
1064 r = evergreen_cs_track_check(p);
1065 if (r) {
1066 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1067 return r;
1068 }
1069 break;
1070 case PACKET3_WAIT_REG_MEM:
1071 if (pkt->count != 5) {
1072 DRM_ERROR("bad WAIT_REG_MEM\n");
1073 return -EINVAL;
1074 }
1075 /* bit 4 is reg (0) or mem (1) */
1076 if (idx_value & 0x10) {
1077 r = evergreen_cs_packet_next_reloc(p, &reloc);
1078 if (r) {
1079 DRM_ERROR("bad WAIT_REG_MEM\n");
1080 return -EINVAL;
1081 }
1082 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1083 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1084 }
1085 break;
1086 case PACKET3_SURFACE_SYNC:
1087 if (pkt->count != 3) {
1088 DRM_ERROR("bad SURFACE_SYNC\n");
1089 return -EINVAL;
1090 }
1091 /* 0xffffffff/0x0 is flush all cache flag */
1092 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1093 radeon_get_ib_value(p, idx + 2) != 0) {
1094 r = evergreen_cs_packet_next_reloc(p, &reloc);
1095 if (r) {
1096 DRM_ERROR("bad SURFACE_SYNC\n");
1097 return -EINVAL;
1098 }
1099 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1100 }
1101 break;
1102 case PACKET3_EVENT_WRITE:
1103 if (pkt->count != 2 && pkt->count != 0) {
1104 DRM_ERROR("bad EVENT_WRITE\n");
1105 return -EINVAL;
1106 }
1107 if (pkt->count) {
1108 r = evergreen_cs_packet_next_reloc(p, &reloc);
1109 if (r) {
1110 DRM_ERROR("bad EVENT_WRITE\n");
1111 return -EINVAL;
1112 }
1113 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1114 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1115 }
1116 break;
1117 case PACKET3_EVENT_WRITE_EOP:
1118 if (pkt->count != 4) {
1119 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1120 return -EINVAL;
1121 }
1122 r = evergreen_cs_packet_next_reloc(p, &reloc);
1123 if (r) {
1124 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1125 return -EINVAL;
1126 }
1127 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1128 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1129 break;
1130 case PACKET3_EVENT_WRITE_EOS:
1131 if (pkt->count != 3) {
1132 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1133 return -EINVAL;
1134 }
1135 r = evergreen_cs_packet_next_reloc(p, &reloc);
1136 if (r) {
1137 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1138 return -EINVAL;
1139 }
1140 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1141 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1142 break;
1143 case PACKET3_SET_CONFIG_REG:
1144 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
1145 end_reg = 4 * pkt->count + start_reg - 4;
1146 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
1147 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1148 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1149 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1150 return -EINVAL;
1151 }
1152 for (i = 0; i < pkt->count; i++) {
1153 reg = start_reg + (4 * i);
1154 r = evergreen_cs_check_reg(p, reg, idx+1+i);
1155 if (r)
1156 return r;
1157 }
1158 break;
1159 case PACKET3_SET_CONTEXT_REG:
1160 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
1161 end_reg = 4 * pkt->count + start_reg - 4;
1162 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
1163 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1164 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1165 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1166 return -EINVAL;
1167 }
1168 for (i = 0; i < pkt->count; i++) {
1169 reg = start_reg + (4 * i);
1170 r = evergreen_cs_check_reg(p, reg, idx+1+i);
1171 if (r)
1172 return r;
1173 }
1174 break;
1175 case PACKET3_SET_RESOURCE:
1176 if (pkt->count % 8) {
1177 DRM_ERROR("bad SET_RESOURCE\n");
1178 return -EINVAL;
1179 }
1180 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
1181 end_reg = 4 * pkt->count + start_reg - 4;
1182 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
1183 (start_reg >= PACKET3_SET_RESOURCE_END) ||
1184 (end_reg >= PACKET3_SET_RESOURCE_END)) {
1185 DRM_ERROR("bad SET_RESOURCE\n");
1186 return -EINVAL;
1187 }
1188 for (i = 0; i < (pkt->count / 8); i++) {
1189 struct radeon_bo *texture, *mipmap;
1190 u32 size, offset;
1191
1192 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
1193 case SQ_TEX_VTX_VALID_TEXTURE:
1194 /* tex base */
1195 r = evergreen_cs_packet_next_reloc(p, &reloc);
1196 if (r) {
1197 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1198 return -EINVAL;
1199 }
1200 ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1201 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1202 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
1203 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1204 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
1205 texture = reloc->robj;
1206 /* tex mip base */
1207 r = evergreen_cs_packet_next_reloc(p, &reloc);
1208 if (r) {
1209 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1210 return -EINVAL;
1211 }
1212 ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1213 mipmap = reloc->robj;
1214 r = evergreen_check_texture_resource(p, idx+1+(i*8),
1215 texture, mipmap);
1216 if (r)
1217 return r;
1218 break;
1219 case SQ_TEX_VTX_VALID_BUFFER:
1220 /* vtx base */
1221 r = evergreen_cs_packet_next_reloc(p, &reloc);
1222 if (r) {
1223 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
1224 return -EINVAL;
1225 }
1226 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
1227 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
1228 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1229 /* force size to size of the buffer */
1230 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
1231 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
1232 }
1233 ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
1234 ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1235 break;
1236 case SQ_TEX_VTX_INVALID_TEXTURE:
1237 case SQ_TEX_VTX_INVALID_BUFFER:
1238 default:
1239 DRM_ERROR("bad SET_RESOURCE\n");
1240 return -EINVAL;
1241 }
1242 }
1243 break;
1244 case PACKET3_SET_ALU_CONST:
1245 /* XXX fix me ALU const buffers only */
1246 break;
1247 case PACKET3_SET_BOOL_CONST:
1248 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
1249 end_reg = 4 * pkt->count + start_reg - 4;
1250 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
1251 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
1252 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
1253 DRM_ERROR("bad SET_BOOL_CONST\n");
1254 return -EINVAL;
1255 }
1256 break;
1257 case PACKET3_SET_LOOP_CONST:
1258 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
1259 end_reg = 4 * pkt->count + start_reg - 4;
1260 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
1261 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
1262 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
1263 DRM_ERROR("bad SET_LOOP_CONST\n");
1264 return -EINVAL;
1265 }
1266 break;
1267 case PACKET3_SET_CTL_CONST:
1268 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
1269 end_reg = 4 * pkt->count + start_reg - 4;
1270 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
1271 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
1272 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
1273 DRM_ERROR("bad SET_CTL_CONST\n");
1274 return -EINVAL;
1275 }
1276 break;
1277 case PACKET3_SET_SAMPLER:
1278 if (pkt->count % 3) {
1279 DRM_ERROR("bad SET_SAMPLER\n");
1280 return -EINVAL;
1281 }
1282 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
1283 end_reg = 4 * pkt->count + start_reg - 4;
1284 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
1285 (start_reg >= PACKET3_SET_SAMPLER_END) ||
1286 (end_reg >= PACKET3_SET_SAMPLER_END)) {
1287 DRM_ERROR("bad SET_SAMPLER\n");
1288 return -EINVAL;
1289 }
1290 break;
1291 case PACKET3_NOP:
1292 break;
1293 default:
1294 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1295 return -EINVAL;
1296 }
1297 return 0;
1298}
1299
1300int evergreen_cs_parse(struct radeon_cs_parser *p)
1301{
1302 struct radeon_cs_packet pkt;
1303 struct evergreen_cs_track *track;
1304 int r;
1305
1306 if (p->track == NULL) {
1307 /* initialize tracker, we are in kms */
1308 track = kzalloc(sizeof(*track), GFP_KERNEL);
1309 if (track == NULL)
1310 return -ENOMEM;
1311 evergreen_cs_track_init(track);
1312 track->npipes = p->rdev->config.evergreen.tiling_npipes;
1313 track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
1314 track->group_size = p->rdev->config.evergreen.tiling_group_size;
1315 p->track = track;
1316 }
1317 do {
1318 r = evergreen_cs_packet_parse(p, &pkt, p->idx);
1319 if (r) {
1320 kfree(p->track);
1321 p->track = NULL;
1322 return r;
1323 }
1324 p->idx += pkt.count + 2;
1325 switch (pkt.type) {
1326 case PACKET_TYPE0:
1327 r = evergreen_cs_parse_packet0(p, &pkt);
1328 break;
1329 case PACKET_TYPE2:
1330 break;
1331 case PACKET_TYPE3:
1332 r = evergreen_packet3_check(p, &pkt);
1333 break;
1334 default:
1335 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1336 kfree(p->track);
1337 p->track = NULL;
1338 return -EINVAL;
1339 }
1340 if (r) {
1341 kfree(p->track);
1342 p->track = NULL;
1343 return r;
1344 }
1345 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1346#if 0
1347 for (r = 0; r < p->ib->length_dw; r++) {
1348 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
1349 mdelay(1);
1350 }
1351#endif
1352 kfree(p->track);
1353 p->track = NULL;
1354 return 0;
1355}
1356
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index af86af836f13..e028c1cd9d9b 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -151,6 +151,9 @@
151#define EVERGREEN_DATA_FORMAT 0x6b00 151#define EVERGREEN_DATA_FORMAT 0x6b00
152# define EVERGREEN_INTERLEAVE_EN (1 << 0) 152# define EVERGREEN_INTERLEAVE_EN (1 << 0)
153#define EVERGREEN_DESKTOP_HEIGHT 0x6b04 153#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
154#define EVERGREEN_VLINE_START_END 0x6b08
155#define EVERGREEN_VLINE_STATUS 0x6bb8
156# define EVERGREEN_VLINE_STAT (1 << 12)
154 157
155#define EVERGREEN_VIEWPORT_START 0x6d70 158#define EVERGREEN_VIEWPORT_START 0x6d70
156#define EVERGREEN_VIEWPORT_SIZE 0x6d74 159#define EVERGREEN_VIEWPORT_SIZE 0x6d74
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 93e9e17ad54a..79683f6b4452 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -218,6 +218,8 @@
218#define CLIP_VTX_REORDER_ENA (1 << 0) 218#define CLIP_VTX_REORDER_ENA (1 << 0)
219#define NUM_CLIP_SEQ(x) ((x) << 1) 219#define NUM_CLIP_SEQ(x) ((x) << 1)
220#define PA_SC_AA_CONFIG 0x28C04 220#define PA_SC_AA_CONFIG 0x28C04
221#define MSAA_NUM_SAMPLES_SHIFT 0
222#define MSAA_NUM_SAMPLES_MASK 0x3
221#define PA_SC_CLIPRECT_RULE 0x2820C 223#define PA_SC_CLIPRECT_RULE 0x2820C
222#define PA_SC_EDGERULE 0x28230 224#define PA_SC_EDGERULE 0x28230
223#define PA_SC_FIFO_SIZE 0x8BCC 225#define PA_SC_FIFO_SIZE 0x8BCC
@@ -553,4 +555,466 @@
553# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 555# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
554# define DC_HPDx_EN (1 << 28) 556# define DC_HPDx_EN (1 << 28)
555 557
558/*
559 * PM4
560 */
561#define PACKET_TYPE0 0
562#define PACKET_TYPE1 1
563#define PACKET_TYPE2 2
564#define PACKET_TYPE3 3
565
566#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
567#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
568#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
569#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
570#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
571 (((reg) >> 2) & 0xFFFF) | \
572 ((n) & 0x3FFF) << 16)
573#define CP_PACKET2 0x80000000
574#define PACKET2_PAD_SHIFT 0
575#define PACKET2_PAD_MASK (0x3fffffff << 0)
576
577#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
578
579#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
580 (((op) & 0xFF) << 8) | \
581 ((n) & 0x3FFF) << 16)
582
583/* Packet 3 types */
584#define PACKET3_NOP 0x10
585#define PACKET3_SET_BASE 0x11
586#define PACKET3_CLEAR_STATE 0x12
587#define PACKET3_INDIRECT_BUFFER_SIZE 0x13
588#define PACKET3_DISPATCH_DIRECT 0x15
589#define PACKET3_DISPATCH_INDIRECT 0x16
590#define PACKET3_INDIRECT_BUFFER_END 0x17
591#define PACKET3_SET_PREDICATION 0x20
592#define PACKET3_REG_RMW 0x21
593#define PACKET3_COND_EXEC 0x22
594#define PACKET3_PRED_EXEC 0x23
595#define PACKET3_DRAW_INDIRECT 0x24
596#define PACKET3_DRAW_INDEX_INDIRECT 0x25
597#define PACKET3_INDEX_BASE 0x26
598#define PACKET3_DRAW_INDEX_2 0x27
599#define PACKET3_CONTEXT_CONTROL 0x28
600#define PACKET3_DRAW_INDEX_OFFSET 0x29
601#define PACKET3_INDEX_TYPE 0x2A
602#define PACKET3_DRAW_INDEX 0x2B
603#define PACKET3_DRAW_INDEX_AUTO 0x2D
604#define PACKET3_DRAW_INDEX_IMMD 0x2E
605#define PACKET3_NUM_INSTANCES 0x2F
606#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
607#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
608#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
609#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
610#define PACKET3_MEM_SEMAPHORE 0x39
611#define PACKET3_MPEG_INDEX 0x3A
612#define PACKET3_WAIT_REG_MEM 0x3C
613#define PACKET3_MEM_WRITE 0x3D
614#define PACKET3_INDIRECT_BUFFER 0x32
615#define PACKET3_SURFACE_SYNC 0x43
616# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
617# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
618# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
619# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
620# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
621# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
622# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
623# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
624# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
625# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
626# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
627# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
628# define PACKET3_CB11_DEST_BASE_ENA (1 << 17)
629# define PACKET3_FULL_CACHE_ENA (1 << 20)
630# define PACKET3_TC_ACTION_ENA (1 << 23)
631# define PACKET3_VC_ACTION_ENA (1 << 24)
632# define PACKET3_CB_ACTION_ENA (1 << 25)
633# define PACKET3_DB_ACTION_ENA (1 << 26)
634# define PACKET3_SH_ACTION_ENA (1 << 27)
635# define PACKET3_SMX_ACTION_ENA (1 << 28)
636#define PACKET3_ME_INITIALIZE 0x44
637#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
638#define PACKET3_COND_WRITE 0x45
639#define PACKET3_EVENT_WRITE 0x46
640#define PACKET3_EVENT_WRITE_EOP 0x47
641#define PACKET3_EVENT_WRITE_EOS 0x48
642#define PACKET3_PREAMBLE_CNTL 0x4A
643#define PACKET3_RB_OFFSET 0x4B
644#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
645#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
646#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
647#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
648#define PACKET3_ONE_REG_WRITE 0x57
649#define PACKET3_SET_CONFIG_REG 0x68
650#define PACKET3_SET_CONFIG_REG_START 0x00008000
651#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
652#define PACKET3_SET_CONTEXT_REG 0x69
653#define PACKET3_SET_CONTEXT_REG_START 0x00028000
654#define PACKET3_SET_CONTEXT_REG_END 0x00029000
655#define PACKET3_SET_ALU_CONST 0x6A
656/* alu const buffers only; no reg file */
657#define PACKET3_SET_BOOL_CONST 0x6B
658#define PACKET3_SET_BOOL_CONST_START 0x0003a500
659#define PACKET3_SET_BOOL_CONST_END 0x0003a518
660#define PACKET3_SET_LOOP_CONST 0x6C
661#define PACKET3_SET_LOOP_CONST_START 0x0003a200
662#define PACKET3_SET_LOOP_CONST_END 0x0003a500
663#define PACKET3_SET_RESOURCE 0x6D
664#define PACKET3_SET_RESOURCE_START 0x00030000
665#define PACKET3_SET_RESOURCE_END 0x00038000
666#define PACKET3_SET_SAMPLER 0x6E
667#define PACKET3_SET_SAMPLER_START 0x0003c000
668#define PACKET3_SET_SAMPLER_END 0x0003c600
669#define PACKET3_SET_CTL_CONST 0x6F
670#define PACKET3_SET_CTL_CONST_START 0x0003cff0
671#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
672#define PACKET3_SET_RESOURCE_OFFSET 0x70
673#define PACKET3_SET_ALU_CONST_VS 0x71
674#define PACKET3_SET_ALU_CONST_DI 0x72
675#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
676#define PACKET3_SET_RESOURCE_INDIRECT 0x74
677#define PACKET3_SET_APPEND_CNT 0x75
678
679#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c
680#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30)
681#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3)
682#define SQ_TEX_VTX_INVALID_TEXTURE 0x0
683#define SQ_TEX_VTX_INVALID_BUFFER 0x1
684#define SQ_TEX_VTX_VALID_TEXTURE 0x2
685#define SQ_TEX_VTX_VALID_BUFFER 0x3
686
687#define SQ_CONST_MEM_BASE 0x8df8
688
689#define SQ_ESGS_RING_SIZE 0x8c44
690#define SQ_GSVS_RING_SIZE 0x8c4c
691#define SQ_ESTMP_RING_SIZE 0x8c54
692#define SQ_GSTMP_RING_SIZE 0x8c5c
693#define SQ_VSTMP_RING_SIZE 0x8c64
694#define SQ_PSTMP_RING_SIZE 0x8c6c
695#define SQ_LSTMP_RING_SIZE 0x8e14
696#define SQ_HSTMP_RING_SIZE 0x8e1c
697#define VGT_TF_RING_SIZE 0x8988
698
699#define SQ_ESGS_RING_ITEMSIZE 0x28900
700#define SQ_GSVS_RING_ITEMSIZE 0x28904
701#define SQ_ESTMP_RING_ITEMSIZE 0x28908
702#define SQ_GSTMP_RING_ITEMSIZE 0x2890c
703#define SQ_VSTMP_RING_ITEMSIZE 0x28910
704#define SQ_PSTMP_RING_ITEMSIZE 0x28914
705#define SQ_LSTMP_RING_ITEMSIZE 0x28830
706#define SQ_HSTMP_RING_ITEMSIZE 0x28834
707
708#define SQ_GS_VERT_ITEMSIZE 0x2891c
709#define SQ_GS_VERT_ITEMSIZE_1 0x28920
710#define SQ_GS_VERT_ITEMSIZE_2 0x28924
711#define SQ_GS_VERT_ITEMSIZE_3 0x28928
712#define SQ_GSVS_RING_OFFSET_1 0x2892c
713#define SQ_GSVS_RING_OFFSET_2 0x28930
714#define SQ_GSVS_RING_OFFSET_3 0x28934
715
716#define SQ_ALU_CONST_CACHE_PS_0 0x28940
717#define SQ_ALU_CONST_CACHE_PS_1 0x28944
718#define SQ_ALU_CONST_CACHE_PS_2 0x28948
719#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
720#define SQ_ALU_CONST_CACHE_PS_4 0x28950
721#define SQ_ALU_CONST_CACHE_PS_5 0x28954
722#define SQ_ALU_CONST_CACHE_PS_6 0x28958
723#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
724#define SQ_ALU_CONST_CACHE_PS_8 0x28960
725#define SQ_ALU_CONST_CACHE_PS_9 0x28964
726#define SQ_ALU_CONST_CACHE_PS_10 0x28968
727#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
728#define SQ_ALU_CONST_CACHE_PS_12 0x28970
729#define SQ_ALU_CONST_CACHE_PS_13 0x28974
730#define SQ_ALU_CONST_CACHE_PS_14 0x28978
731#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
732#define SQ_ALU_CONST_CACHE_VS_0 0x28980
733#define SQ_ALU_CONST_CACHE_VS_1 0x28984
734#define SQ_ALU_CONST_CACHE_VS_2 0x28988
735#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
736#define SQ_ALU_CONST_CACHE_VS_4 0x28990
737#define SQ_ALU_CONST_CACHE_VS_5 0x28994
738#define SQ_ALU_CONST_CACHE_VS_6 0x28998
739#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
740#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
741#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
742#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
743#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
744#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
745#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
746#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
747#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
748#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
749#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
750#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
751#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
752#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
753#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
754#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
755#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
756#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
757#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
758#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
759#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
760#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
761#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
762#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
763#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
764#define SQ_ALU_CONST_CACHE_HS_0 0x28f00
765#define SQ_ALU_CONST_CACHE_HS_1 0x28f04
766#define SQ_ALU_CONST_CACHE_HS_2 0x28f08
767#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c
768#define SQ_ALU_CONST_CACHE_HS_4 0x28f10
769#define SQ_ALU_CONST_CACHE_HS_5 0x28f14
770#define SQ_ALU_CONST_CACHE_HS_6 0x28f18
771#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c
772#define SQ_ALU_CONST_CACHE_HS_8 0x28f20
773#define SQ_ALU_CONST_CACHE_HS_9 0x28f24
774#define SQ_ALU_CONST_CACHE_HS_10 0x28f28
775#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c
776#define SQ_ALU_CONST_CACHE_HS_12 0x28f30
777#define SQ_ALU_CONST_CACHE_HS_13 0x28f34
778#define SQ_ALU_CONST_CACHE_HS_14 0x28f38
779#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c
780#define SQ_ALU_CONST_CACHE_LS_0 0x28f40
781#define SQ_ALU_CONST_CACHE_LS_1 0x28f44
782#define SQ_ALU_CONST_CACHE_LS_2 0x28f48
783#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c
784#define SQ_ALU_CONST_CACHE_LS_4 0x28f50
785#define SQ_ALU_CONST_CACHE_LS_5 0x28f54
786#define SQ_ALU_CONST_CACHE_LS_6 0x28f58
787#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c
788#define SQ_ALU_CONST_CACHE_LS_8 0x28f60
789#define SQ_ALU_CONST_CACHE_LS_9 0x28f64
790#define SQ_ALU_CONST_CACHE_LS_10 0x28f68
791#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c
792#define SQ_ALU_CONST_CACHE_LS_12 0x28f70
793#define SQ_ALU_CONST_CACHE_LS_13 0x28f74
794#define SQ_ALU_CONST_CACHE_LS_14 0x28f78
795#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c
796
797#define DB_DEPTH_CONTROL 0x28800
798#define DB_DEPTH_VIEW 0x28008
799#define DB_HTILE_DATA_BASE 0x28014
800#define DB_Z_INFO 0x28040
801# define Z_ARRAY_MODE(x) ((x) << 4)
802#define DB_STENCIL_INFO 0x28044
803#define DB_Z_READ_BASE 0x28048
804#define DB_STENCIL_READ_BASE 0x2804c
805#define DB_Z_WRITE_BASE 0x28050
806#define DB_STENCIL_WRITE_BASE 0x28054
807#define DB_DEPTH_SIZE 0x28058
808
809#define SQ_PGM_START_PS 0x28840
810#define SQ_PGM_START_VS 0x2885c
811#define SQ_PGM_START_GS 0x28874
812#define SQ_PGM_START_ES 0x2888c
813#define SQ_PGM_START_FS 0x288a4
814#define SQ_PGM_START_HS 0x288b8
815#define SQ_PGM_START_LS 0x288d0
816
817#define VGT_STRMOUT_CONFIG 0x28b94
818#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
819
820#define CB_TARGET_MASK 0x28238
821#define CB_SHADER_MASK 0x2823c
822
823#define GDS_ADDR_BASE 0x28720
824
825#define CB_IMMED0_BASE 0x28b9c
826#define CB_IMMED1_BASE 0x28ba0
827#define CB_IMMED2_BASE 0x28ba4
828#define CB_IMMED3_BASE 0x28ba8
829#define CB_IMMED4_BASE 0x28bac
830#define CB_IMMED5_BASE 0x28bb0
831#define CB_IMMED6_BASE 0x28bb4
832#define CB_IMMED7_BASE 0x28bb8
833#define CB_IMMED8_BASE 0x28bbc
834#define CB_IMMED9_BASE 0x28bc0
835#define CB_IMMED10_BASE 0x28bc4
836#define CB_IMMED11_BASE 0x28bc8
837
838/* all 12 CB blocks have these regs */
839#define CB_COLOR0_BASE 0x28c60
840#define CB_COLOR0_PITCH 0x28c64
841#define CB_COLOR0_SLICE 0x28c68
842#define CB_COLOR0_VIEW 0x28c6c
843#define CB_COLOR0_INFO 0x28c70
844# define CB_ARRAY_MODE(x) ((x) << 8)
845# define ARRAY_LINEAR_GENERAL 0
846# define ARRAY_LINEAR_ALIGNED 1
847# define ARRAY_1D_TILED_THIN1 2
848# define ARRAY_2D_TILED_THIN1 4
849#define CB_COLOR0_ATTRIB 0x28c74
850#define CB_COLOR0_DIM 0x28c78
851/* only CB0-7 blocks have these regs */
852#define CB_COLOR0_CMASK 0x28c7c
853#define CB_COLOR0_CMASK_SLICE 0x28c80
854#define CB_COLOR0_FMASK 0x28c84
855#define CB_COLOR0_FMASK_SLICE 0x28c88
856#define CB_COLOR0_CLEAR_WORD0 0x28c8c
857#define CB_COLOR0_CLEAR_WORD1 0x28c90
858#define CB_COLOR0_CLEAR_WORD2 0x28c94
859#define CB_COLOR0_CLEAR_WORD3 0x28c98
860
861#define CB_COLOR1_BASE 0x28c9c
862#define CB_COLOR2_BASE 0x28cd8
863#define CB_COLOR3_BASE 0x28d14
864#define CB_COLOR4_BASE 0x28d50
865#define CB_COLOR5_BASE 0x28d8c
866#define CB_COLOR6_BASE 0x28dc8
867#define CB_COLOR7_BASE 0x28e04
868#define CB_COLOR8_BASE 0x28e40
869#define CB_COLOR9_BASE 0x28e5c
870#define CB_COLOR10_BASE 0x28e78
871#define CB_COLOR11_BASE 0x28e94
872
873#define CB_COLOR1_PITCH 0x28ca0
874#define CB_COLOR2_PITCH 0x28cdc
875#define CB_COLOR3_PITCH 0x28d18
876#define CB_COLOR4_PITCH 0x28d54
877#define CB_COLOR5_PITCH 0x28d90
878#define CB_COLOR6_PITCH 0x28dcc
879#define CB_COLOR7_PITCH 0x28e08
880#define CB_COLOR8_PITCH 0x28e44
881#define CB_COLOR9_PITCH 0x28e60
882#define CB_COLOR10_PITCH 0x28e7c
883#define CB_COLOR11_PITCH 0x28e98
884
885#define CB_COLOR1_SLICE 0x28ca4
886#define CB_COLOR2_SLICE 0x28ce0
887#define CB_COLOR3_SLICE 0x28d1c
888#define CB_COLOR4_SLICE 0x28d58
889#define CB_COLOR5_SLICE 0x28d94
890#define CB_COLOR6_SLICE 0x28dd0
891#define CB_COLOR7_SLICE 0x28e0c
892#define CB_COLOR8_SLICE 0x28e48
893#define CB_COLOR9_SLICE 0x28e64
894#define CB_COLOR10_SLICE 0x28e80
895#define CB_COLOR11_SLICE 0x28e9c
896
897#define CB_COLOR1_VIEW 0x28ca8
898#define CB_COLOR2_VIEW 0x28ce4
899#define CB_COLOR3_VIEW 0x28d20
900#define CB_COLOR4_VIEW 0x28d5c
901#define CB_COLOR5_VIEW 0x28d98
902#define CB_COLOR6_VIEW 0x28dd4
903#define CB_COLOR7_VIEW 0x28e10
904#define CB_COLOR8_VIEW 0x28e4c
905#define CB_COLOR9_VIEW 0x28e68
906#define CB_COLOR10_VIEW 0x28e84
907#define CB_COLOR11_VIEW 0x28ea0
908
909#define CB_COLOR1_INFO 0x28cac
910#define CB_COLOR2_INFO 0x28ce8
911#define CB_COLOR3_INFO 0x28d24
912#define CB_COLOR4_INFO 0x28d60
913#define CB_COLOR5_INFO 0x28d9c
914#define CB_COLOR6_INFO 0x28dd8
915#define CB_COLOR7_INFO 0x28e14
916#define CB_COLOR8_INFO 0x28e50
917#define CB_COLOR9_INFO 0x28e6c
918#define CB_COLOR10_INFO 0x28e88
919#define CB_COLOR11_INFO 0x28ea4
920
921#define CB_COLOR1_ATTRIB 0x28cb0
922#define CB_COLOR2_ATTRIB 0x28cec
923#define CB_COLOR3_ATTRIB 0x28d28
924#define CB_COLOR4_ATTRIB 0x28d64
925#define CB_COLOR5_ATTRIB 0x28da0
926#define CB_COLOR6_ATTRIB 0x28ddc
927#define CB_COLOR7_ATTRIB 0x28e18
928#define CB_COLOR8_ATTRIB 0x28e54
929#define CB_COLOR9_ATTRIB 0x28e70
930#define CB_COLOR10_ATTRIB 0x28e8c
931#define CB_COLOR11_ATTRIB 0x28ea8
932
933#define CB_COLOR1_DIM 0x28cb4
934#define CB_COLOR2_DIM 0x28cf0
935#define CB_COLOR3_DIM 0x28d2c
936#define CB_COLOR4_DIM 0x28d68
937#define CB_COLOR5_DIM 0x28da4
938#define CB_COLOR6_DIM 0x28de0
939#define CB_COLOR7_DIM 0x28e1c
940#define CB_COLOR8_DIM 0x28e58
941#define CB_COLOR9_DIM 0x28e74
942#define CB_COLOR10_DIM 0x28e90
943#define CB_COLOR11_DIM 0x28eac
944
945#define CB_COLOR1_CMASK 0x28cb8
946#define CB_COLOR2_CMASK 0x28cf4
947#define CB_COLOR3_CMASK 0x28d30
948#define CB_COLOR4_CMASK 0x28d6c
949#define CB_COLOR5_CMASK 0x28da8
950#define CB_COLOR6_CMASK 0x28de4
951#define CB_COLOR7_CMASK 0x28e20
952
953#define CB_COLOR1_CMASK_SLICE 0x28cbc
954#define CB_COLOR2_CMASK_SLICE 0x28cf8
955#define CB_COLOR3_CMASK_SLICE 0x28d34
956#define CB_COLOR4_CMASK_SLICE 0x28d70
957#define CB_COLOR5_CMASK_SLICE 0x28dac
958#define CB_COLOR6_CMASK_SLICE 0x28de8
959#define CB_COLOR7_CMASK_SLICE 0x28e24
960
961#define CB_COLOR1_FMASK 0x28cc0
962#define CB_COLOR2_FMASK 0x28cfc
963#define CB_COLOR3_FMASK 0x28d38
964#define CB_COLOR4_FMASK 0x28d74
965#define CB_COLOR5_FMASK 0x28db0
966#define CB_COLOR6_FMASK 0x28dec
967#define CB_COLOR7_FMASK 0x28e28
968
969#define CB_COLOR1_FMASK_SLICE 0x28cc4
970#define CB_COLOR2_FMASK_SLICE 0x28d00
971#define CB_COLOR3_FMASK_SLICE 0x28d3c
972#define CB_COLOR4_FMASK_SLICE 0x28d78
973#define CB_COLOR5_FMASK_SLICE 0x28db4
974#define CB_COLOR6_FMASK_SLICE 0x28df0
975#define CB_COLOR7_FMASK_SLICE 0x28e2c
976
977#define CB_COLOR1_CLEAR_WORD0 0x28cc8
978#define CB_COLOR2_CLEAR_WORD0 0x28d04
979#define CB_COLOR3_CLEAR_WORD0 0x28d40
980#define CB_COLOR4_CLEAR_WORD0 0x28d7c
981#define CB_COLOR5_CLEAR_WORD0 0x28db8
982#define CB_COLOR6_CLEAR_WORD0 0x28df4
983#define CB_COLOR7_CLEAR_WORD0 0x28e30
984
985#define CB_COLOR1_CLEAR_WORD1 0x28ccc
986#define CB_COLOR2_CLEAR_WORD1 0x28d08
987#define CB_COLOR3_CLEAR_WORD1 0x28d44
988#define CB_COLOR4_CLEAR_WORD1 0x28d80
989#define CB_COLOR5_CLEAR_WORD1 0x28dbc
990#define CB_COLOR6_CLEAR_WORD1 0x28df8
991#define CB_COLOR7_CLEAR_WORD1 0x28e34
992
993#define CB_COLOR1_CLEAR_WORD2 0x28cd0
994#define CB_COLOR2_CLEAR_WORD2 0x28d0c
995#define CB_COLOR3_CLEAR_WORD2 0x28d48
996#define CB_COLOR4_CLEAR_WORD2 0x28d84
997#define CB_COLOR5_CLEAR_WORD2 0x28dc0
998#define CB_COLOR6_CLEAR_WORD2 0x28dfc
999#define CB_COLOR7_CLEAR_WORD2 0x28e38
1000
1001#define CB_COLOR1_CLEAR_WORD3 0x28cd4
1002#define CB_COLOR2_CLEAR_WORD3 0x28d10
1003#define CB_COLOR3_CLEAR_WORD3 0x28d4c
1004#define CB_COLOR4_CLEAR_WORD3 0x28d88
1005#define CB_COLOR5_CLEAR_WORD3 0x28dc4
1006#define CB_COLOR6_CLEAR_WORD3 0x28e00
1007#define CB_COLOR7_CLEAR_WORD3 0x28e3c
1008
1009#define SQ_TEX_RESOURCE_WORD0_0 0x30000
1010#define SQ_TEX_RESOURCE_WORD1_0 0x30004
1011# define TEX_ARRAY_MODE(x) ((x) << 28)
1012#define SQ_TEX_RESOURCE_WORD2_0 0x30008
1013#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
1014#define SQ_TEX_RESOURCE_WORD4_0 0x30010
1015#define SQ_TEX_RESOURCE_WORD5_0 0x30014
1016#define SQ_TEX_RESOURCE_WORD6_0 0x30018
1017#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
1018
1019
556#endif 1020#endif
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 44e96a2ae25a..e14f59748e65 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -475,6 +475,12 @@ void r600_pm_init_profile(struct radeon_device *rdev)
475 475
476void r600_pm_misc(struct radeon_device *rdev) 476void r600_pm_misc(struct radeon_device *rdev)
477{ 477{
478 int requested_index = rdev->pm.requested_power_state_index;
479 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
480 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
481
482 if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
483 radeon_atom_set_voltage(rdev, voltage->voltage);
478 484
479} 485}
480 486
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 669feb689bfc..5f96fe871b3f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -176,6 +176,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
176void radeon_pm_resume(struct radeon_device *rdev); 176void radeon_pm_resume(struct radeon_device *rdev);
177void radeon_combios_get_power_modes(struct radeon_device *rdev); 177void radeon_combios_get_power_modes(struct radeon_device *rdev);
178void radeon_atombios_get_power_modes(struct radeon_device *rdev); 178void radeon_atombios_get_power_modes(struct radeon_device *rdev);
179void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
179 180
180/* 181/*
181 * Fences. 182 * Fences.
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e57df08d4aeb..87f7e2cc52d4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -724,8 +724,8 @@ static struct radeon_asic evergreen_asic = {
724 .irq_set = &evergreen_irq_set, 724 .irq_set = &evergreen_irq_set,
725 .irq_process = &evergreen_irq_process, 725 .irq_process = &evergreen_irq_process,
726 .get_vblank_counter = &evergreen_get_vblank_counter, 726 .get_vblank_counter = &evergreen_get_vblank_counter,
727 .fence_ring_emit = NULL, 727 .fence_ring_emit = &r600_fence_ring_emit,
728 .cs_parse = NULL, 728 .cs_parse = &evergreen_cs_parse,
729 .copy_blit = NULL, 729 .copy_blit = NULL,
730 .copy_dma = NULL, 730 .copy_dma = NULL,
731 .copy = NULL, 731 .copy = NULL,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5c40a3dfaca2..c0bbaa64157a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -314,6 +314,7 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev,
314u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); 314u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
315int evergreen_irq_set(struct radeon_device *rdev); 315int evergreen_irq_set(struct radeon_device *rdev);
316int evergreen_irq_process(struct radeon_device *rdev); 316int evergreen_irq_process(struct radeon_device *rdev);
317extern int evergreen_cs_parse(struct radeon_cs_parser *p);
317extern void evergreen_pm_misc(struct radeon_device *rdev); 318extern void evergreen_pm_misc(struct radeon_device *rdev);
318extern void evergreen_pm_prepare(struct radeon_device *rdev); 319extern void evergreen_pm_prepare(struct radeon_device *rdev);
319extern void evergreen_pm_finish(struct radeon_device *rdev); 320extern void evergreen_pm_finish(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 24ea683f7cf5..4305cd55d0ac 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1538,7 +1538,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1538 rdev->pm.power_state[state_index].pcie_lanes = 1538 rdev->pm.power_state[state_index].pcie_lanes =
1539 power_info->info.asPowerPlayInfo[i].ucNumPciELanes; 1539 power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
1540 misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); 1540 misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
1541 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 1541 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
1542 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
1542 rdev->pm.power_state[state_index].clock_info[0].voltage.type = 1543 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1543 VOLTAGE_GPIO; 1544 VOLTAGE_GPIO;
1544 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = 1545 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1605,7 +1606,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1605 power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; 1606 power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
1606 misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); 1607 misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
1607 misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); 1608 misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
1608 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 1609 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
1610 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
1609 rdev->pm.power_state[state_index].clock_info[0].voltage.type = 1611 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1610 VOLTAGE_GPIO; 1612 VOLTAGE_GPIO;
1611 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = 1613 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1679,7 +1681,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1679 power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; 1681 power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
1680 misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); 1682 misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
1681 misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); 1683 misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
1682 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 1684 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
1685 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
1683 rdev->pm.power_state[state_index].clock_info[0].voltage.type = 1686 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1684 VOLTAGE_GPIO; 1687 VOLTAGE_GPIO;
1685 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = 1688 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1755,9 +1758,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1755 rdev->pm.power_state[state_index].misc2 = 0; 1758 rdev->pm.power_state[state_index].misc2 = 0;
1756 } 1759 }
1757 } else { 1760 } else {
1761 int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
1762 uint8_t fw_frev, fw_crev;
1763 uint16_t fw_data_offset, vddc = 0;
1764 union firmware_info *firmware_info;
1765 ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
1766
1767 if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL,
1768 &fw_frev, &fw_crev, &fw_data_offset)) {
1769 firmware_info =
1770 (union firmware_info *)(mode_info->atom_context->bios +
1771 fw_data_offset);
1772 vddc = firmware_info->info_14.usBootUpVDDCVoltage;
1773 }
1774
1758 /* add the i2c bus for thermal/fan chip */ 1775 /* add the i2c bus for thermal/fan chip */
1759 /* no support for internal controller yet */ 1776 /* no support for internal controller yet */
1760 ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
1761 if (controller->ucType > 0) { 1777 if (controller->ucType > 0) {
1762 if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || 1778 if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
1763 (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || 1779 (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
@@ -1904,6 +1920,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1904 rdev->pm.default_power_state_index = state_index; 1920 rdev->pm.default_power_state_index = state_index;
1905 rdev->pm.power_state[state_index].default_clock_mode = 1921 rdev->pm.power_state[state_index].default_clock_mode =
1906 &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; 1922 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
1923 /* patch the table values with the default slck/mclk from firmware info */
1924 for (j = 0; j < mode_index; j++) {
1925 rdev->pm.power_state[state_index].clock_info[j].mclk =
1926 rdev->clock.default_mclk;
1927 rdev->pm.power_state[state_index].clock_info[j].sclk =
1928 rdev->clock.default_sclk;
1929 if (vddc)
1930 rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
1931 vddc;
1932 }
1907 } 1933 }
1908 state_index++; 1934 state_index++;
1909 } 1935 }
@@ -1998,6 +2024,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
1998 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2024 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1999} 2025}
2000 2026
2027union set_voltage {
2028 struct _SET_VOLTAGE_PS_ALLOCATION alloc;
2029 struct _SET_VOLTAGE_PARAMETERS v1;
2030 struct _SET_VOLTAGE_PARAMETERS_V2 v2;
2031};
2032
2033void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
2034{
2035 union set_voltage args;
2036 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
2037 u8 frev, crev, volt_index = level;
2038
2039 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2040 return;
2041
2042 switch (crev) {
2043 case 1:
2044 args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
2045 args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
2046 args.v1.ucVoltageIndex = volt_index;
2047 break;
2048 case 2:
2049 args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
2050 args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
2051 args.v2.usVoltageLevel = cpu_to_le16(level);
2052 break;
2053 default:
2054 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
2055 return;
2056 }
2057
2058 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2059}
2060
2061
2062
2001void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) 2063void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
2002{ 2064{
2003 struct radeon_device *rdev = dev->dev_private; 2065 struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 7b5e10d3e9c9..102c744eaf5a 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2454,7 +2454,12 @@ default_mode:
2454 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; 2454 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2455 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; 2455 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2456 rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; 2456 rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
2457 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2457 if ((state_index > 0) &&
2458 (rdev->pm.power_state[0].clock_info[0].voltage.type = VOLTAGE_GPIO))
2459 rdev->pm.power_state[state_index].clock_info[0].voltage =
2460 rdev->pm.power_state[0].clock_info[0].voltage;
2461 else
2462 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2458 rdev->pm.power_state[state_index].pcie_lanes = 16; 2463 rdev->pm.power_state[state_index].pcie_lanes = 16;
2459 rdev->pm.power_state[state_index].flags = 0; 2464 rdev->pm.power_state[state_index].flags = 0;
2460 rdev->pm.default_power_state_index = state_index; 2465 rdev->pm.default_power_state_index = state_index;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index fdc3fdf78acb..f10faed21567 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
546 /* don't suspend or resume card normally */ 546 /* don't suspend or resume card normally */
547 rdev->powered_down = false; 547 rdev->powered_down = false;
548 radeon_resume_kms(dev); 548 radeon_resume_kms(dev);
549 drm_kms_helper_poll_enable(dev);
549 } else { 550 } else {
550 printk(KERN_INFO "radeon: switched off\n"); 551 printk(KERN_INFO "radeon: switched off\n");
552 drm_kms_helper_poll_disable(dev);
551 radeon_suspend_kms(dev, pmm); 553 radeon_suspend_kms(dev, pmm);
552 /* don't suspend or resume card normally */ 554 /* don't suspend or resume card normally */
553 rdev->powered_down = true; 555 rdev->powered_down = true;
@@ -711,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
711{ 713{
712 struct radeon_device *rdev; 714 struct radeon_device *rdev;
713 struct drm_crtc *crtc; 715 struct drm_crtc *crtc;
716 struct drm_connector *connector;
714 int r; 717 int r;
715 718
716 if (dev == NULL || dev->dev_private == NULL) { 719 if (dev == NULL || dev->dev_private == NULL) {
@@ -723,6 +726,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
723 726
724 if (rdev->powered_down) 727 if (rdev->powered_down)
725 return 0; 728 return 0;
729
730 /* turn off display hw */
731 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
732 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
733 }
734
726 /* unpin the front buffers */ 735 /* unpin the front buffers */
727 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 736 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
728 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 737 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index a8d162c6f829..02281269a881 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -151,6 +151,7 @@ static void radeon_sync_with_vblank(struct radeon_device *rdev)
151static void radeon_set_power_state(struct radeon_device *rdev) 151static void radeon_set_power_state(struct radeon_device *rdev)
152{ 152{
153 u32 sclk, mclk; 153 u32 sclk, mclk;
154 bool misc_after = false;
154 155
155 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 156 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
156 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 157 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
@@ -167,55 +168,47 @@ static void radeon_set_power_state(struct radeon_device *rdev)
167 if (mclk > rdev->clock.default_mclk) 168 if (mclk > rdev->clock.default_mclk)
168 mclk = rdev->clock.default_mclk; 169 mclk = rdev->clock.default_mclk;
169 170
170 /* voltage, pcie lanes, etc.*/ 171 /* upvolt before raising clocks, downvolt after lowering clocks */
171 radeon_pm_misc(rdev); 172 if (sclk < rdev->pm.current_sclk)
173 misc_after = true;
172 174
173 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 175 radeon_sync_with_vblank(rdev);
174 radeon_sync_with_vblank(rdev);
175 176
177 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
176 if (!radeon_pm_in_vbl(rdev)) 178 if (!radeon_pm_in_vbl(rdev))
177 return; 179 return;
180 }
178 181
179 radeon_pm_prepare(rdev); 182 radeon_pm_prepare(rdev);
180 /* set engine clock */
181 if (sclk != rdev->pm.current_sclk) {
182 radeon_pm_debug_check_in_vbl(rdev, false);
183 radeon_set_engine_clock(rdev, sclk);
184 radeon_pm_debug_check_in_vbl(rdev, true);
185 rdev->pm.current_sclk = sclk;
186 DRM_DEBUG("Setting: e: %d\n", sclk);
187 }
188 183
189 /* set memory clock */ 184 if (!misc_after)
190 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { 185 /* voltage, pcie lanes, etc.*/
191 radeon_pm_debug_check_in_vbl(rdev, false); 186 radeon_pm_misc(rdev);
192 radeon_set_memory_clock(rdev, mclk); 187
193 radeon_pm_debug_check_in_vbl(rdev, true); 188 /* set engine clock */
194 rdev->pm.current_mclk = mclk; 189 if (sclk != rdev->pm.current_sclk) {
195 DRM_DEBUG("Setting: m: %d\n", mclk); 190 radeon_pm_debug_check_in_vbl(rdev, false);
196 } 191 radeon_set_engine_clock(rdev, sclk);
197 radeon_pm_finish(rdev); 192 radeon_pm_debug_check_in_vbl(rdev, true);
198 } else { 193 rdev->pm.current_sclk = sclk;
199 /* set engine clock */ 194 DRM_DEBUG("Setting: e: %d\n", sclk);
200 if (sclk != rdev->pm.current_sclk) { 195 }
201 radeon_sync_with_vblank(rdev); 196
202 radeon_pm_prepare(rdev); 197 /* set memory clock */
203 radeon_set_engine_clock(rdev, sclk); 198 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
204 radeon_pm_finish(rdev); 199 radeon_pm_debug_check_in_vbl(rdev, false);
205 rdev->pm.current_sclk = sclk; 200 radeon_set_memory_clock(rdev, mclk);
206 DRM_DEBUG("Setting: e: %d\n", sclk); 201 radeon_pm_debug_check_in_vbl(rdev, true);
207 } 202 rdev->pm.current_mclk = mclk;
208 /* set memory clock */ 203 DRM_DEBUG("Setting: m: %d\n", mclk);
209 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
210 radeon_sync_with_vblank(rdev);
211 radeon_pm_prepare(rdev);
212 radeon_set_memory_clock(rdev, mclk);
213 radeon_pm_finish(rdev);
214 rdev->pm.current_mclk = mclk;
215 DRM_DEBUG("Setting: m: %d\n", mclk);
216 }
217 } 204 }
218 205
206 if (misc_after)
207 /* voltage, pcie lanes, etc.*/
208 radeon_pm_misc(rdev);
209
210 radeon_pm_finish(rdev);
211
219 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; 212 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
220 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; 213 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
221 } else 214 } else
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
new file mode 100644
index 000000000000..b5c757f68d3c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -0,0 +1,611 @@
1evergreen 0x9400
20x00008040 WAIT_UNTIL
30x00008044 WAIT_UNTIL_POLL_CNTL
40x00008048 WAIT_UNTIL_POLL_MASK
50x0000804c WAIT_UNTIL_POLL_REFDATA
60x000088B0 VGT_VTX_VECT_EJECT_REG
70x000088C4 VGT_CACHE_INVALIDATION
80x000088D4 VGT_GS_VERTEX_REUSE
90x00008958 VGT_PRIMITIVE_TYPE
100x0000895C VGT_INDEX_TYPE
110x00008970 VGT_NUM_INDICES
120x00008974 VGT_NUM_INSTANCES
130x00008990 VGT_COMPUTE_DIM_X
140x00008994 VGT_COMPUTE_DIM_Y
150x00008998 VGT_COMPUTE_DIM_Z
160x0000899C VGT_COMPUTE_START_X
170x000089A0 VGT_COMPUTE_START_Y
180x000089A4 VGT_COMPUTE_START_Z
190x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
200x00008A14 PA_CL_ENHANCE
210x00008A60 PA_SC_LINE_STIPPLE_VALUE
220x00008B10 PA_SC_LINE_STIPPLE_STATE
230x00008BF0 PA_SC_ENHANCE
240x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
250x00008C00 SQ_CONFIG
260x00008C04 SQ_GPR_RESOURCE_MGMT_1
270x00008C08 SQ_GPR_RESOURCE_MGMT_2
280x00008C0C SQ_GPR_RESOURCE_MGMT_3
290x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
300x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
310x00008C18 SQ_THREAD_RESOURCE_MGMT
320x00008C1C SQ_THREAD_RESOURCE_MGMT_2
330x00008C20 SQ_STACK_RESOURCE_MGMT_1
340x00008C24 SQ_STACK_RESOURCE_MGMT_2
350x00008C28 SQ_STACK_RESOURCE_MGMT_3
360x00008DF8 SQ_CONST_MEM_BASE
370x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
380x00009100 SPI_CONFIG_CNTL
390x0000913C SPI_CONFIG_CNTL_1
400x00009700 VC_CNTL
410x00009714 VC_ENHANCE
420x00009830 DB_DEBUG
430x00009834 DB_DEBUG2
440x00009838 DB_DEBUG3
450x0000983C DB_DEBUG4
460x00009854 DB_WATERMARKS
470x0000A400 TD_PS_BORDER_COLOR_INDEX
480x0000A404 TD_PS_BORDER_COLOR_RED
490x0000A408 TD_PS_BORDER_COLOR_GREEN
500x0000A40C TD_PS_BORDER_COLOR_BLUE
510x0000A410 TD_PS_BORDER_COLOR_ALPHA
520x0000A414 TD_VS_BORDER_COLOR_INDEX
530x0000A418 TD_VS_BORDER_COLOR_RED
540x0000A41C TD_VS_BORDER_COLOR_GREEN
550x0000A420 TD_VS_BORDER_COLOR_BLUE
560x0000A424 TD_VS_BORDER_COLOR_ALPHA
570x0000A428 TD_GS_BORDER_COLOR_INDEX
580x0000A42C TD_GS_BORDER_COLOR_RED
590x0000A430 TD_GS_BORDER_COLOR_GREEN
600x0000A434 TD_GS_BORDER_COLOR_BLUE
610x0000A438 TD_GS_BORDER_COLOR_ALPHA
620x0000A43C TD_HS_BORDER_COLOR_INDEX
630x0000A440 TD_HS_BORDER_COLOR_RED
640x0000A444 TD_HS_BORDER_COLOR_GREEN
650x0000A448 TD_HS_BORDER_COLOR_BLUE
660x0000A44C TD_HS_BORDER_COLOR_ALPHA
670x0000A450 TD_LS_BORDER_COLOR_INDEX
680x0000A454 TD_LS_BORDER_COLOR_RED
690x0000A458 TD_LS_BORDER_COLOR_GREEN
700x0000A45C TD_LS_BORDER_COLOR_BLUE
710x0000A460 TD_LS_BORDER_COLOR_ALPHA
720x0000A464 TD_CS_BORDER_COLOR_INDEX
730x0000A468 TD_CS_BORDER_COLOR_RED
740x0000A46C TD_CS_BORDER_COLOR_GREEN
750x0000A470 TD_CS_BORDER_COLOR_BLUE
760x0000A474 TD_CS_BORDER_COLOR_ALPHA
770x00028000 DB_RENDER_CONTROL
780x00028004 DB_COUNT_CONTROL
790x0002800C DB_RENDER_OVERRIDE
800x00028010 DB_RENDER_OVERRIDE2
810x00028028 DB_STENCIL_CLEAR
820x0002802C DB_DEPTH_CLEAR
830x00028034 PA_SC_SCREEN_SCISSOR_BR
840x00028030 PA_SC_SCREEN_SCISSOR_TL
850x0002805C DB_DEPTH_SLICE
860x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
870x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
880x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
890x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
900x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
910x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
920x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
930x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
940x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
950x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
960x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
970x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
980x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
990x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
1000x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
1010x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
1020x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
1030x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
1040x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
1050x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
1060x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
1070x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
1080x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
1090x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
1100x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
1110x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
1120x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
1130x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
1140x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
1150x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
1160x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
1170x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
1180x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
1190x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
1200x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
1210x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
1220x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
1230x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
1240x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
1250x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
1260x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
1270x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
1280x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
1290x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
1300x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
1310x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
1320x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
1330x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
1340x00028200 PA_SC_WINDOW_OFFSET
1350x00028204 PA_SC_WINDOW_SCISSOR_TL
1360x00028208 PA_SC_WINDOW_SCISSOR_BR
1370x0002820C PA_SC_CLIPRECT_RULE
1380x00028210 PA_SC_CLIPRECT_0_TL
1390x00028214 PA_SC_CLIPRECT_0_BR
1400x00028218 PA_SC_CLIPRECT_1_TL
1410x0002821C PA_SC_CLIPRECT_1_BR
1420x00028220 PA_SC_CLIPRECT_2_TL
1430x00028224 PA_SC_CLIPRECT_2_BR
1440x00028228 PA_SC_CLIPRECT_3_TL
1450x0002822C PA_SC_CLIPRECT_3_BR
1460x00028230 PA_SC_EDGERULE
1470x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
1480x00028240 PA_SC_GENERIC_SCISSOR_TL
1490x00028244 PA_SC_GENERIC_SCISSOR_BR
1500x00028250 PA_SC_VPORT_SCISSOR_0_TL
1510x00028254 PA_SC_VPORT_SCISSOR_0_BR
1520x00028258 PA_SC_VPORT_SCISSOR_1_TL
1530x0002825C PA_SC_VPORT_SCISSOR_1_BR
1540x00028260 PA_SC_VPORT_SCISSOR_2_TL
1550x00028264 PA_SC_VPORT_SCISSOR_2_BR
1560x00028268 PA_SC_VPORT_SCISSOR_3_TL
1570x0002826C PA_SC_VPORT_SCISSOR_3_BR
1580x00028270 PA_SC_VPORT_SCISSOR_4_TL
1590x00028274 PA_SC_VPORT_SCISSOR_4_BR
1600x00028278 PA_SC_VPORT_SCISSOR_5_TL
1610x0002827C PA_SC_VPORT_SCISSOR_5_BR
1620x00028280 PA_SC_VPORT_SCISSOR_6_TL
1630x00028284 PA_SC_VPORT_SCISSOR_6_BR
1640x00028288 PA_SC_VPORT_SCISSOR_7_TL
1650x0002828C PA_SC_VPORT_SCISSOR_7_BR
1660x00028290 PA_SC_VPORT_SCISSOR_8_TL
1670x00028294 PA_SC_VPORT_SCISSOR_8_BR
1680x00028298 PA_SC_VPORT_SCISSOR_9_TL
1690x0002829C PA_SC_VPORT_SCISSOR_9_BR
1700x000282A0 PA_SC_VPORT_SCISSOR_10_TL
1710x000282A4 PA_SC_VPORT_SCISSOR_10_BR
1720x000282A8 PA_SC_VPORT_SCISSOR_11_TL
1730x000282AC PA_SC_VPORT_SCISSOR_11_BR
1740x000282B0 PA_SC_VPORT_SCISSOR_12_TL
1750x000282B4 PA_SC_VPORT_SCISSOR_12_BR
1760x000282B8 PA_SC_VPORT_SCISSOR_13_TL
1770x000282BC PA_SC_VPORT_SCISSOR_13_BR
1780x000282C0 PA_SC_VPORT_SCISSOR_14_TL
1790x000282C4 PA_SC_VPORT_SCISSOR_14_BR
1800x000282C8 PA_SC_VPORT_SCISSOR_15_TL
1810x000282CC PA_SC_VPORT_SCISSOR_15_BR
1820x000282D0 PA_SC_VPORT_ZMIN_0
1830x000282D4 PA_SC_VPORT_ZMAX_0
1840x000282D8 PA_SC_VPORT_ZMIN_1
1850x000282DC PA_SC_VPORT_ZMAX_1
1860x000282E0 PA_SC_VPORT_ZMIN_2
1870x000282E4 PA_SC_VPORT_ZMAX_2
1880x000282E8 PA_SC_VPORT_ZMIN_3
1890x000282EC PA_SC_VPORT_ZMAX_3
1900x000282F0 PA_SC_VPORT_ZMIN_4
1910x000282F4 PA_SC_VPORT_ZMAX_4
1920x000282F8 PA_SC_VPORT_ZMIN_5
1930x000282FC PA_SC_VPORT_ZMAX_5
1940x00028300 PA_SC_VPORT_ZMIN_6
1950x00028304 PA_SC_VPORT_ZMAX_6
1960x00028308 PA_SC_VPORT_ZMIN_7
1970x0002830C PA_SC_VPORT_ZMAX_7
1980x00028310 PA_SC_VPORT_ZMIN_8
1990x00028314 PA_SC_VPORT_ZMAX_8
2000x00028318 PA_SC_VPORT_ZMIN_9
2010x0002831C PA_SC_VPORT_ZMAX_9
2020x00028320 PA_SC_VPORT_ZMIN_10
2030x00028324 PA_SC_VPORT_ZMAX_10
2040x00028328 PA_SC_VPORT_ZMIN_11
2050x0002832C PA_SC_VPORT_ZMAX_11
2060x00028330 PA_SC_VPORT_ZMIN_12
2070x00028334 PA_SC_VPORT_ZMAX_12
2080x00028338 PA_SC_VPORT_ZMIN_13
2090x0002833C PA_SC_VPORT_ZMAX_13
2100x00028340 PA_SC_VPORT_ZMIN_14
2110x00028344 PA_SC_VPORT_ZMAX_14
2120x00028348 PA_SC_VPORT_ZMIN_15
2130x0002834C PA_SC_VPORT_ZMAX_15
2140x00028350 SX_MISC
2150x00028380 SQ_VTX_SEMANTIC_0
2160x00028384 SQ_VTX_SEMANTIC_1
2170x00028388 SQ_VTX_SEMANTIC_2
2180x0002838C SQ_VTX_SEMANTIC_3
2190x00028390 SQ_VTX_SEMANTIC_4
2200x00028394 SQ_VTX_SEMANTIC_5
2210x00028398 SQ_VTX_SEMANTIC_6
2220x0002839C SQ_VTX_SEMANTIC_7
2230x000283A0 SQ_VTX_SEMANTIC_8
2240x000283A4 SQ_VTX_SEMANTIC_9
2250x000283A8 SQ_VTX_SEMANTIC_10
2260x000283AC SQ_VTX_SEMANTIC_11
2270x000283B0 SQ_VTX_SEMANTIC_12
2280x000283B4 SQ_VTX_SEMANTIC_13
2290x000283B8 SQ_VTX_SEMANTIC_14
2300x000283BC SQ_VTX_SEMANTIC_15
2310x000283C0 SQ_VTX_SEMANTIC_16
2320x000283C4 SQ_VTX_SEMANTIC_17
2330x000283C8 SQ_VTX_SEMANTIC_18
2340x000283CC SQ_VTX_SEMANTIC_19
2350x000283D0 SQ_VTX_SEMANTIC_20
2360x000283D4 SQ_VTX_SEMANTIC_21
2370x000283D8 SQ_VTX_SEMANTIC_22
2380x000283DC SQ_VTX_SEMANTIC_23
2390x000283E0 SQ_VTX_SEMANTIC_24
2400x000283E4 SQ_VTX_SEMANTIC_25
2410x000283E8 SQ_VTX_SEMANTIC_26
2420x000283EC SQ_VTX_SEMANTIC_27
2430x000283F0 SQ_VTX_SEMANTIC_28
2440x000283F4 SQ_VTX_SEMANTIC_29
2450x000283F8 SQ_VTX_SEMANTIC_30
2460x000283FC SQ_VTX_SEMANTIC_31
2470x00028400 VGT_MAX_VTX_INDX
2480x00028404 VGT_MIN_VTX_INDX
2490x00028408 VGT_INDX_OFFSET
2500x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
2510x00028410 SX_ALPHA_TEST_CONTROL
2520x00028414 CB_BLEND_RED
2530x00028418 CB_BLEND_GREEN
2540x0002841C CB_BLEND_BLUE
2550x00028420 CB_BLEND_ALPHA
2560x00028430 DB_STENCILREFMASK
2570x00028434 DB_STENCILREFMASK_BF
2580x00028438 SX_ALPHA_REF
2590x0002843C PA_CL_VPORT_XSCALE_0
2600x00028440 PA_CL_VPORT_XOFFSET_0
2610x00028444 PA_CL_VPORT_YSCALE_0
2620x00028448 PA_CL_VPORT_YOFFSET_0
2630x0002844C PA_CL_VPORT_ZSCALE_0
2640x00028450 PA_CL_VPORT_ZOFFSET_0
2650x00028454 PA_CL_VPORT_XSCALE_1
2660x00028458 PA_CL_VPORT_XOFFSET_1
2670x0002845C PA_CL_VPORT_YSCALE_1
2680x00028460 PA_CL_VPORT_YOFFSET_1
2690x00028464 PA_CL_VPORT_ZSCALE_1
2700x00028468 PA_CL_VPORT_ZOFFSET_1
2710x0002846C PA_CL_VPORT_XSCALE_2
2720x00028470 PA_CL_VPORT_XOFFSET_2
2730x00028474 PA_CL_VPORT_YSCALE_2
2740x00028478 PA_CL_VPORT_YOFFSET_2
2750x0002847C PA_CL_VPORT_ZSCALE_2
2760x00028480 PA_CL_VPORT_ZOFFSET_2
2770x00028484 PA_CL_VPORT_XSCALE_3
2780x00028488 PA_CL_VPORT_XOFFSET_3
2790x0002848C PA_CL_VPORT_YSCALE_3
2800x00028490 PA_CL_VPORT_YOFFSET_3
2810x00028494 PA_CL_VPORT_ZSCALE_3
2820x00028498 PA_CL_VPORT_ZOFFSET_3
2830x0002849C PA_CL_VPORT_XSCALE_4
2840x000284A0 PA_CL_VPORT_XOFFSET_4
2850x000284A4 PA_CL_VPORT_YSCALE_4
2860x000284A8 PA_CL_VPORT_YOFFSET_4
2870x000284AC PA_CL_VPORT_ZSCALE_4
2880x000284B0 PA_CL_VPORT_ZOFFSET_4
2890x000284B4 PA_CL_VPORT_XSCALE_5
2900x000284B8 PA_CL_VPORT_XOFFSET_5
2910x000284BC PA_CL_VPORT_YSCALE_5
2920x000284C0 PA_CL_VPORT_YOFFSET_5
2930x000284C4 PA_CL_VPORT_ZSCALE_5
2940x000284C8 PA_CL_VPORT_ZOFFSET_5
2950x000284CC PA_CL_VPORT_XSCALE_6
2960x000284D0 PA_CL_VPORT_XOFFSET_6
2970x000284D4 PA_CL_VPORT_YSCALE_6
2980x000284D8 PA_CL_VPORT_YOFFSET_6
2990x000284DC PA_CL_VPORT_ZSCALE_6
3000x000284E0 PA_CL_VPORT_ZOFFSET_6
3010x000284E4 PA_CL_VPORT_XSCALE_7
3020x000284E8 PA_CL_VPORT_XOFFSET_7
3030x000284EC PA_CL_VPORT_YSCALE_7
3040x000284F0 PA_CL_VPORT_YOFFSET_7
3050x000284F4 PA_CL_VPORT_ZSCALE_7
3060x000284F8 PA_CL_VPORT_ZOFFSET_7
3070x000284FC PA_CL_VPORT_XSCALE_8
3080x00028500 PA_CL_VPORT_XOFFSET_8
3090x00028504 PA_CL_VPORT_YSCALE_8
3100x00028508 PA_CL_VPORT_YOFFSET_8
3110x0002850C PA_CL_VPORT_ZSCALE_8
3120x00028510 PA_CL_VPORT_ZOFFSET_8
3130x00028514 PA_CL_VPORT_XSCALE_9
3140x00028518 PA_CL_VPORT_XOFFSET_9
3150x0002851C PA_CL_VPORT_YSCALE_9
3160x00028520 PA_CL_VPORT_YOFFSET_9
3170x00028524 PA_CL_VPORT_ZSCALE_9
3180x00028528 PA_CL_VPORT_ZOFFSET_9
3190x0002852C PA_CL_VPORT_XSCALE_10
3200x00028530 PA_CL_VPORT_XOFFSET_10
3210x00028534 PA_CL_VPORT_YSCALE_10
3220x00028538 PA_CL_VPORT_YOFFSET_10
3230x0002853C PA_CL_VPORT_ZSCALE_10
3240x00028540 PA_CL_VPORT_ZOFFSET_10
3250x00028544 PA_CL_VPORT_XSCALE_11
3260x00028548 PA_CL_VPORT_XOFFSET_11
3270x0002854C PA_CL_VPORT_YSCALE_11
3280x00028550 PA_CL_VPORT_YOFFSET_11
3290x00028554 PA_CL_VPORT_ZSCALE_11
3300x00028558 PA_CL_VPORT_ZOFFSET_11
3310x0002855C PA_CL_VPORT_XSCALE_12
3320x00028560 PA_CL_VPORT_XOFFSET_12
3330x00028564 PA_CL_VPORT_YSCALE_12
3340x00028568 PA_CL_VPORT_YOFFSET_12
3350x0002856C PA_CL_VPORT_ZSCALE_12
3360x00028570 PA_CL_VPORT_ZOFFSET_12
3370x00028574 PA_CL_VPORT_XSCALE_13
3380x00028578 PA_CL_VPORT_XOFFSET_13
3390x0002857C PA_CL_VPORT_YSCALE_13
3400x00028580 PA_CL_VPORT_YOFFSET_13
3410x00028584 PA_CL_VPORT_ZSCALE_13
3420x00028588 PA_CL_VPORT_ZOFFSET_13
3430x0002858C PA_CL_VPORT_XSCALE_14
3440x00028590 PA_CL_VPORT_XOFFSET_14
3450x00028594 PA_CL_VPORT_YSCALE_14
3460x00028598 PA_CL_VPORT_YOFFSET_14
3470x0002859C PA_CL_VPORT_ZSCALE_14
3480x000285A0 PA_CL_VPORT_ZOFFSET_14
3490x000285A4 PA_CL_VPORT_XSCALE_15
3500x000285A8 PA_CL_VPORT_XOFFSET_15
3510x000285AC PA_CL_VPORT_YSCALE_15
3520x000285B0 PA_CL_VPORT_YOFFSET_15
3530x000285B4 PA_CL_VPORT_ZSCALE_15
3540x000285B8 PA_CL_VPORT_ZOFFSET_15
3550x000285BC PA_CL_UCP_0_X
3560x000285C0 PA_CL_UCP_0_Y
3570x000285C4 PA_CL_UCP_0_Z
3580x000285C8 PA_CL_UCP_0_W
3590x000285CC PA_CL_UCP_1_X
3600x000285D0 PA_CL_UCP_1_Y
3610x000285D4 PA_CL_UCP_1_Z
3620x000285D8 PA_CL_UCP_1_W
3630x000285DC PA_CL_UCP_2_X
3640x000285E0 PA_CL_UCP_2_Y
3650x000285E4 PA_CL_UCP_2_Z
3660x000285E8 PA_CL_UCP_2_W
3670x000285EC PA_CL_UCP_3_X
3680x000285F0 PA_CL_UCP_3_Y
3690x000285F4 PA_CL_UCP_3_Z
3700x000285F8 PA_CL_UCP_3_W
3710x000285FC PA_CL_UCP_4_X
3720x00028600 PA_CL_UCP_4_Y
3730x00028604 PA_CL_UCP_4_Z
3740x00028608 PA_CL_UCP_4_W
3750x0002860C PA_CL_UCP_5_X
3760x00028610 PA_CL_UCP_5_Y
3770x00028614 PA_CL_UCP_5_Z
3780x00028618 PA_CL_UCP_5_W
3790x0002861C SPI_VS_OUT_ID_0
3800x00028620 SPI_VS_OUT_ID_1
3810x00028624 SPI_VS_OUT_ID_2
3820x00028628 SPI_VS_OUT_ID_3
3830x0002862C SPI_VS_OUT_ID_4
3840x00028630 SPI_VS_OUT_ID_5
3850x00028634 SPI_VS_OUT_ID_6
3860x00028638 SPI_VS_OUT_ID_7
3870x0002863C SPI_VS_OUT_ID_8
3880x00028640 SPI_VS_OUT_ID_9
3890x00028644 SPI_PS_INPUT_CNTL_0
3900x00028648 SPI_PS_INPUT_CNTL_1
3910x0002864C SPI_PS_INPUT_CNTL_2
3920x00028650 SPI_PS_INPUT_CNTL_3
3930x00028654 SPI_PS_INPUT_CNTL_4
3940x00028658 SPI_PS_INPUT_CNTL_5
3950x0002865C SPI_PS_INPUT_CNTL_6
3960x00028660 SPI_PS_INPUT_CNTL_7
3970x00028664 SPI_PS_INPUT_CNTL_8
3980x00028668 SPI_PS_INPUT_CNTL_9
3990x0002866C SPI_PS_INPUT_CNTL_10
4000x00028670 SPI_PS_INPUT_CNTL_11
4010x00028674 SPI_PS_INPUT_CNTL_12
4020x00028678 SPI_PS_INPUT_CNTL_13
4030x0002867C SPI_PS_INPUT_CNTL_14
4040x00028680 SPI_PS_INPUT_CNTL_15
4050x00028684 SPI_PS_INPUT_CNTL_16
4060x00028688 SPI_PS_INPUT_CNTL_17
4070x0002868C SPI_PS_INPUT_CNTL_18
4080x00028690 SPI_PS_INPUT_CNTL_19
4090x00028694 SPI_PS_INPUT_CNTL_20
4100x00028698 SPI_PS_INPUT_CNTL_21
4110x0002869C SPI_PS_INPUT_CNTL_22
4120x000286A0 SPI_PS_INPUT_CNTL_23
4130x000286A4 SPI_PS_INPUT_CNTL_24
4140x000286A8 SPI_PS_INPUT_CNTL_25
4150x000286AC SPI_PS_INPUT_CNTL_26
4160x000286B0 SPI_PS_INPUT_CNTL_27
4170x000286B4 SPI_PS_INPUT_CNTL_28
4180x000286B8 SPI_PS_INPUT_CNTL_29
4190x000286BC SPI_PS_INPUT_CNTL_30
4200x000286C0 SPI_PS_INPUT_CNTL_31
4210x000286C4 SPI_VS_OUT_CONFIG
4220x000286C8 SPI_THREAD_GROUPING
4230x000286CC SPI_PS_IN_CONTROL_0
4240x000286D0 SPI_PS_IN_CONTROL_1
4250x000286D4 SPI_INTERP_CONTROL_0
4260x000286D8 SPI_INPUT_Z
4270x000286DC SPI_FOG_CNTL
4280x000286E0 SPI_BARYC_CNTL
4290x000286E4 SPI_PS_IN_CONTROL_2
4300x000286E8 SPI_COMPUTE_INPUT_CNTL
4310x000286EC SPI_COMPUTE_NUM_THREAD_X
4320x000286F0 SPI_COMPUTE_NUM_THREAD_Y
4330x000286F4 SPI_COMPUTE_NUM_THREAD_Z
4340x000286F8 GDS_ADDR_SIZE
4350x00028780 CB_BLEND0_CONTROL
4360x00028784 CB_BLEND1_CONTROL
4370x00028788 CB_BLEND2_CONTROL
4380x0002878C CB_BLEND3_CONTROL
4390x00028790 CB_BLEND4_CONTROL
4400x00028794 CB_BLEND5_CONTROL
4410x00028798 CB_BLEND6_CONTROL
4420x0002879C CB_BLEND7_CONTROL
4430x000287CC CS_COPY_STATE
4440x000287D0 GFX_COPY_STATE
4450x000287D4 PA_CL_POINT_X_RAD
4460x000287D8 PA_CL_POINT_Y_RAD
4470x000287DC PA_CL_POINT_SIZE
4480x000287E0 PA_CL_POINT_CULL_RAD
4490x00028808 CB_COLOR_CONTROL
4500x0002880C DB_SHADER_CONTROL
4510x00028810 PA_CL_CLIP_CNTL
4520x00028814 PA_SU_SC_MODE_CNTL
4530x00028818 PA_CL_VTE_CNTL
4540x0002881C PA_CL_VS_OUT_CNTL
4550x00028820 PA_CL_NANINF_CNTL
4560x00028824 PA_SU_LINE_STIPPLE_CNTL
4570x00028828 PA_SU_LINE_STIPPLE_SCALE
4580x0002882C PA_SU_PRIM_FILTER_CNTL
4590x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1
4600x00028844 SQ_PGM_RESOURCES_PS
4610x00028848 SQ_PGM_RESOURCES_2_PS
4620x0002884C SQ_PGM_EXPORTS_PS
4630x0002885C SQ_PGM_RESOURCES_VS
4640x00028860 SQ_PGM_RESOURCES_2_VS
4650x00028878 SQ_PGM_RESOURCES_GS
4660x0002887C SQ_PGM_RESOURCES_2_GS
4670x00028890 SQ_PGM_RESOURCES_ES
4680x00028894 SQ_PGM_RESOURCES_2_ES
4690x000288A8 SQ_PGM_RESOURCES_FS
4700x000288BC SQ_PGM_RESOURCES_HS
4710x000288C0 SQ_PGM_RESOURCES_2_HS
4720x000288D0 SQ_PGM_RESOURCES_LS
4730x000288D4 SQ_PGM_RESOURCES_2_LS
4740x000288E8 SQ_LDS_ALLOC
4750x000288EC SQ_LDS_ALLOC_PS
4760x000288F0 SQ_VTX_SEMANTIC_CLEAR
4770x00028A00 PA_SU_POINT_SIZE
4780x00028A04 PA_SU_POINT_MINMAX
4790x00028A08 PA_SU_LINE_CNTL
4800x00028A0C PA_SC_LINE_STIPPLE
4810x00028A10 VGT_OUTPUT_PATH_CNTL
4820x00028A14 VGT_HOS_CNTL
4830x00028A18 VGT_HOS_MAX_TESS_LEVEL
4840x00028A1C VGT_HOS_MIN_TESS_LEVEL
4850x00028A20 VGT_HOS_REUSE_DEPTH
4860x00028A24 VGT_GROUP_PRIM_TYPE
4870x00028A28 VGT_GROUP_FIRST_DECR
4880x00028A2C VGT_GROUP_DECR
4890x00028A30 VGT_GROUP_VECT_0_CNTL
4900x00028A34 VGT_GROUP_VECT_1_CNTL
4910x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
4920x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
4930x00028A40 VGT_GS_MODE
4940x00028A48 PA_SC_MODE_CNTL_0
4950x00028A4C PA_SC_MODE_CNTL_1
4960x00028A50 VGT_ENHANCE
4970x00028A54 VGT_GS_PER_ES
4980x00028A58 VGT_ES_PER_GS
4990x00028A5C VGT_GS_PER_VS
5000x00028A6C VGT_GS_OUT_PRIM_TYPE
5010x00028A84 VGT_PRIMITIVEID_EN
5020x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
5030x00028AA0 VGT_INSTANCE_STEP_RATE_0
5040x00028AA4 VGT_INSTANCE_STEP_RATE_1
5050x00028AB4 VGT_REUSE_OFF
5060x00028AB8 VGT_VTX_CNT_EN
5070x00028ABC DB_HTILE_SURFACE
5080x00028AC0 DB_SRESULTS_COMPARE_STATE0
5090x00028AC4 DB_SRESULTS_COMPARE_STATE1
5100x00028AC8 DB_PRELOAD_CONTROL
5110x00028B38 VGT_GS_MAX_VERT_OUT
5120x00028B54 VGT_SHADER_STAGES_EN
5130x00028B58 VGT_LS_HS_CONFIG
5140x00028B5C VGT_LS_SIZE
5150x00028B60 VGT_HS_SIZE
5160x00028B64 VGT_LS_HS_ALLOC
5170x00028B68 VGT_HS_PATCH_CONST
5180x00028B6C VGT_TF_PARAM
5190x00028B70 DB_ALPHA_TO_MASK
5200x00028B74 VGT_DISPATCH_INITIATOR
5210x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
5220x00028B7C PA_SU_POLY_OFFSET_CLAMP
5230x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
5240x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
5250x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
5260x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
5270x00028B74 VGT_GS_INSTANCE_CNT
5280x00028C00 PA_SC_LINE_CNTL
5290x00028C08 PA_SU_VTX_CNTL
5300x00028C0C PA_CL_GB_VERT_CLIP_ADJ
5310x00028C10 PA_CL_GB_VERT_DISC_ADJ
5320x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
5330x00028C18 PA_CL_GB_HORZ_DISC_ADJ
5340x00028C1C PA_SC_AA_SAMPLE_LOCS_0
5350x00028C20 PA_SC_AA_SAMPLE_LOCS_1
5360x00028C24 PA_SC_AA_SAMPLE_LOCS_2
5370x00028C28 PA_SC_AA_SAMPLE_LOCS_3
5380x00028C2C PA_SC_AA_SAMPLE_LOCS_4
5390x00028C30 PA_SC_AA_SAMPLE_LOCS_5
5400x00028C34 PA_SC_AA_SAMPLE_LOCS_6
5410x00028C38 PA_SC_AA_SAMPLE_LOCS_7
5420x00028C3C PA_SC_AA_MASK
5430x00028C8C CB_COLOR0_CLEAR_WORD0
5440x00028C90 CB_COLOR0_CLEAR_WORD1
5450x00028C94 CB_COLOR0_CLEAR_WORD2
5460x00028C98 CB_COLOR0_CLEAR_WORD3
5470x00028CC8 CB_COLOR1_CLEAR_WORD0
5480x00028CCC CB_COLOR1_CLEAR_WORD1
5490x00028CD0 CB_COLOR1_CLEAR_WORD2
5500x00028CD4 CB_COLOR1_CLEAR_WORD3
5510x00028D04 CB_COLOR2_CLEAR_WORD0
5520x00028D08 CB_COLOR2_CLEAR_WORD1
5530x00028D0C CB_COLOR2_CLEAR_WORD2
5540x00028D10 CB_COLOR2_CLEAR_WORD3
5550x00028D40 CB_COLOR3_CLEAR_WORD0
5560x00028D44 CB_COLOR3_CLEAR_WORD1
5570x00028D48 CB_COLOR3_CLEAR_WORD2
5580x00028D4C CB_COLOR3_CLEAR_WORD3
5590x00028D7C CB_COLOR4_CLEAR_WORD0
5600x00028D80 CB_COLOR4_CLEAR_WORD1
5610x00028D84 CB_COLOR4_CLEAR_WORD2
5620x00028D88 CB_COLOR4_CLEAR_WORD3
5630x00028DB8 CB_COLOR5_CLEAR_WORD0
5640x00028DBC CB_COLOR5_CLEAR_WORD1
5650x00028DC0 CB_COLOR5_CLEAR_WORD2
5660x00028DC4 CB_COLOR5_CLEAR_WORD3
5670x00028DF4 CB_COLOR6_CLEAR_WORD0
5680x00028DF8 CB_COLOR6_CLEAR_WORD1
5690x00028DFC CB_COLOR6_CLEAR_WORD2
5700x00028E00 CB_COLOR6_CLEAR_WORD3
5710x00028E30 CB_COLOR7_CLEAR_WORD0
5720x00028E34 CB_COLOR7_CLEAR_WORD1
5730x00028E38 CB_COLOR7_CLEAR_WORD2
5740x00028E3C CB_COLOR7_CLEAR_WORD3
5750x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
5760x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
5770x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
5780x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
5790x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
5800x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
5810x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
5820x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
5830x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
5840x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
5850x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
5860x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
5870x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
5880x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
5890x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
5900x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
5910x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
5920x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
5930x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
5940x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
5950x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
5960x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
5970x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
5980x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
5990x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
6000x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
6010x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
6020x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
6030x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
6040x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
6050x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
6060x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
6070x0003CFF0 SQ_VTX_BASE_VTX_LOC
6080x0003CFF4 SQ_VTX_START_INST_LOC
6090x0003FF00 SQ_TEX_SAMPLER_CLEAR
6100x0003FF04 SQ_TEX_RESOURCE_CLEAR
6110x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 79887cac5b54..7bb4c3e52f3b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -74,7 +74,8 @@ void rs600_pm_misc(struct radeon_device *rdev)
74 if (voltage->delay) 74 if (voltage->delay)
75 udelay(voltage->delay); 75 udelay(voltage->delay);
76 } 76 }
77 } 77 } else if (voltage->type == VOLTAGE_VDDC)
78 radeon_atom_set_voltage(rdev, voltage->vddc_id);
78 79
79 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); 80 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
80 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); 81 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 253f24aec031..33952da65340 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -44,7 +44,12 @@ void rv770_fini(struct radeon_device *rdev);
44 44
45void rv770_pm_misc(struct radeon_device *rdev) 45void rv770_pm_misc(struct radeon_device *rdev)
46{ 46{
47 int requested_index = rdev->pm.requested_power_state_index;
48 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
49 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
47 50
51 if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
52 radeon_atom_set_voltage(rdev, voltage->voltage);
48} 53}
49 54
50/* 55/*
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 0d9a42c2394f..ef910694bd63 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -77,7 +77,7 @@ struct ttm_page_pool {
77/** 77/**
78 * Limits for the pool. They are handled without locks because only place where 78 * Limits for the pool. They are handled without locks because only place where
79 * they may change is in sysfs store. They won't have immediate effect anyway 79 * they may change is in sysfs store. They won't have immediate effect anyway
80 * so forcing serialiazation to access them is pointless. 80 * so forcing serialization to access them is pointless.
81 */ 81 */
82 82
83struct ttm_pool_opts { 83struct ttm_pool_opts {
@@ -165,16 +165,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
165 m->options.small = val; 165 m->options.small = val;
166 else if (attr == &ttm_page_pool_alloc_size) { 166 else if (attr == &ttm_page_pool_alloc_size) {
167 if (val > NUM_PAGES_TO_ALLOC*8) { 167 if (val > NUM_PAGES_TO_ALLOC*8) {
168 printk(KERN_ERR "[ttm] Setting allocation size to %lu " 168 printk(KERN_ERR TTM_PFX
169 "is not allowed. Recomended size is " 169 "Setting allocation size to %lu "
170 "%lu\n", 170 "is not allowed. Recommended size is "
171 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), 171 "%lu\n",
172 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 172 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
173 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
173 return size; 174 return size;
174 } else if (val > NUM_PAGES_TO_ALLOC) { 175 } else if (val > NUM_PAGES_TO_ALLOC) {
175 printk(KERN_WARNING "[ttm] Setting allocation size to " 176 printk(KERN_WARNING TTM_PFX
176 "larger than %lu is not recomended.\n", 177 "Setting allocation size to "
177 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 178 "larger than %lu is not recommended.\n",
179 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
178 } 180 }
179 m->options.alloc_size = val; 181 m->options.alloc_size = val;
180 } 182 }
@@ -277,7 +279,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
277{ 279{
278 unsigned i; 280 unsigned i;
279 if (set_pages_array_wb(pages, npages)) 281 if (set_pages_array_wb(pages, npages))
280 printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n", 282 printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
281 npages); 283 npages);
282 for (i = 0; i < npages; ++i) 284 for (i = 0; i < npages; ++i)
283 __free_page(pages[i]); 285 __free_page(pages[i]);
@@ -313,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
313 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), 315 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
314 GFP_KERNEL); 316 GFP_KERNEL);
315 if (!pages_to_free) { 317 if (!pages_to_free) {
316 printk(KERN_ERR "Failed to allocate memory for pool free operation.\n"); 318 printk(KERN_ERR TTM_PFX
319 "Failed to allocate memory for pool free operation.\n");
317 return 0; 320 return 0;
318 } 321 }
319 322
@@ -390,7 +393,7 @@ static int ttm_pool_get_num_unused_pages(void)
390} 393}
391 394
392/** 395/**
393 * Calback for mm to request pool to reduce number of page held. 396 * Callback for mm to request pool to reduce number of page held.
394 */ 397 */
395static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) 398static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
396{ 399{
@@ -433,14 +436,16 @@ static int ttm_set_pages_caching(struct page **pages,
433 case tt_uncached: 436 case tt_uncached:
434 r = set_pages_array_uc(pages, cpages); 437 r = set_pages_array_uc(pages, cpages);
435 if (r) 438 if (r)
436 printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n", 439 printk(KERN_ERR TTM_PFX
437 cpages); 440 "Failed to set %d pages to uc!\n",
441 cpages);
438 break; 442 break;
439 case tt_wc: 443 case tt_wc:
440 r = set_pages_array_wc(pages, cpages); 444 r = set_pages_array_wc(pages, cpages);
441 if (r) 445 if (r)
442 printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n", 446 printk(KERN_ERR TTM_PFX
443 cpages); 447 "Failed to set %d pages to wc!\n",
448 cpages);
444 break; 449 break;
445 default: 450 default:
446 break; 451 break;
@@ -458,7 +463,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
458 struct page **failed_pages, unsigned cpages) 463 struct page **failed_pages, unsigned cpages)
459{ 464{
460 unsigned i; 465 unsigned i;
461 /* Failed pages has to be reed */ 466 /* Failed pages have to be freed */
462 for (i = 0; i < cpages; ++i) { 467 for (i = 0; i < cpages; ++i) {
463 list_del(&failed_pages[i]->lru); 468 list_del(&failed_pages[i]->lru);
464 __free_page(failed_pages[i]); 469 __free_page(failed_pages[i]);
@@ -485,7 +490,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
485 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); 490 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
486 491
487 if (!caching_array) { 492 if (!caching_array) {
488 printk(KERN_ERR "[ttm] unable to allocate table for new pages."); 493 printk(KERN_ERR TTM_PFX
494 "Unable to allocate table for new pages.");
489 return -ENOMEM; 495 return -ENOMEM;
490 } 496 }
491 497
@@ -493,12 +499,13 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
493 p = alloc_page(gfp_flags); 499 p = alloc_page(gfp_flags);
494 500
495 if (!p) { 501 if (!p) {
496 printk(KERN_ERR "[ttm] unable to get page %u\n", i); 502 printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
497 503
498 /* store already allocated pages in the pool after 504 /* store already allocated pages in the pool after
499 * setting the caching state */ 505 * setting the caching state */
500 if (cpages) { 506 if (cpages) {
501 r = ttm_set_pages_caching(caching_array, cstate, cpages); 507 r = ttm_set_pages_caching(caching_array,
508 cstate, cpages);
502 if (r) 509 if (r)
503 ttm_handle_caching_state_failure(pages, 510 ttm_handle_caching_state_failure(pages,
504 ttm_flags, cstate, 511 ttm_flags, cstate,
@@ -590,7 +597,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
590 ++pool->nrefills; 597 ++pool->nrefills;
591 pool->npages += alloc_size; 598 pool->npages += alloc_size;
592 } else { 599 } else {
593 printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool); 600 printk(KERN_ERR TTM_PFX
601 "Failed to fill pool (%p).", pool);
594 /* If we have any pages left put them to the pool. */ 602 /* If we have any pages left put them to the pool. */
595 list_for_each_entry(p, &pool->list, lru) { 603 list_for_each_entry(p, &pool->list, lru) {
596 ++cpages; 604 ++cpages;
@@ -671,13 +679,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
671 if (flags & TTM_PAGE_FLAG_DMA32) 679 if (flags & TTM_PAGE_FLAG_DMA32)
672 gfp_flags |= GFP_DMA32; 680 gfp_flags |= GFP_DMA32;
673 else 681 else
674 gfp_flags |= __GFP_HIGHMEM; 682 gfp_flags |= GFP_HIGHUSER;
675 683
676 for (r = 0; r < count; ++r) { 684 for (r = 0; r < count; ++r) {
677 p = alloc_page(gfp_flags); 685 p = alloc_page(gfp_flags);
678 if (!p) { 686 if (!p) {
679 687
680 printk(KERN_ERR "[ttm] unable to allocate page."); 688 printk(KERN_ERR TTM_PFX
689 "Unable to allocate page.");
681 return -ENOMEM; 690 return -ENOMEM;
682 } 691 }
683 692
@@ -709,8 +718,9 @@ int ttm_get_pages(struct list_head *pages, int flags,
709 if (r) { 718 if (r) {
710 /* If there is any pages in the list put them back to 719 /* If there is any pages in the list put them back to
711 * the pool. */ 720 * the pool. */
712 printk(KERN_ERR "[ttm] Failed to allocate extra pages " 721 printk(KERN_ERR TTM_PFX
713 "for large request."); 722 "Failed to allocate extra pages "
723 "for large request.");
714 ttm_put_pages(pages, 0, flags, cstate); 724 ttm_put_pages(pages, 0, flags, cstate);
715 return r; 725 return r;
716 } 726 }
@@ -778,7 +788,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
778 if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) 788 if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
779 return 0; 789 return 0;
780 790
781 printk(KERN_INFO "[ttm] Initializing pool allocator.\n"); 791 printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
782 792
783 ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); 793 ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
784 794
@@ -813,7 +823,7 @@ void ttm_page_alloc_fini()
813 if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) 823 if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
814 return; 824 return;
815 825
816 printk(KERN_INFO "[ttm] Finilizing pool allocator.\n"); 826 printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
817 ttm_pool_mm_shrink_fini(&_manager); 827 ttm_pool_mm_shrink_fini(&_manager);
818 828
819 for (i = 0; i < NUM_POOLS; ++i) 829 for (i = 0; i < NUM_POOLS; ++i)
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 1a3cb6816d1c..4505e17df3f5 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ 4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ 5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o 7 vmwgfx_overlay.o vmwgfx_fence.o
8 8
9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0c9c0811f42d..b793c8c9acb3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -88,6 +88,9 @@
88#define DRM_IOCTL_VMW_FENCE_WAIT \ 88#define DRM_IOCTL_VMW_FENCE_WAIT \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ 89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
90 struct drm_vmw_fence_wait_arg) 90 struct drm_vmw_fence_wait_arg)
91#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
92 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
93 struct drm_vmw_update_layout_arg)
91 94
92 95
93/** 96/**
@@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
135 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, 138 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
136 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), 139 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
137 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, 140 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
138 DRM_AUTH | DRM_UNLOCKED) 141 DRM_AUTH | DRM_UNLOCKED),
142 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
143 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
139}; 144};
140 145
141static struct pci_device_id vmw_pci_id_list[] = { 146static struct pci_device_id vmw_pci_id_list[] = {
@@ -318,6 +323,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
318 goto out_err3; 323 goto out_err3;
319 } 324 }
320 325
326 /* Need mmio memory to check for fifo pitchlock cap. */
327 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
328 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
329 !vmw_fifo_have_pitchlock(dev_priv)) {
330 ret = -ENOSYS;
331 DRM_ERROR("Hardware has no pitchlock\n");
332 goto out_err4;
333 }
334
321 dev_priv->tdev = ttm_object_device_init 335 dev_priv->tdev = ttm_object_device_init
322 (dev_priv->mem_global_ref.object, 12); 336 (dev_priv->mem_global_ref.object, 12);
323 337
@@ -399,8 +413,6 @@ static int vmw_driver_unload(struct drm_device *dev)
399{ 413{
400 struct vmw_private *dev_priv = vmw_priv(dev); 414 struct vmw_private *dev_priv = vmw_priv(dev);
401 415
402 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
403
404 unregister_pm_notifier(&dev_priv->pm_nb); 416 unregister_pm_notifier(&dev_priv->pm_nb);
405 417
406 vmw_fb_close(dev_priv); 418 vmw_fb_close(dev_priv);
@@ -546,7 +558,6 @@ static int vmw_master_create(struct drm_device *dev,
546{ 558{
547 struct vmw_master *vmaster; 559 struct vmw_master *vmaster;
548 560
549 DRM_INFO("Master create.\n");
550 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); 561 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
551 if (unlikely(vmaster == NULL)) 562 if (unlikely(vmaster == NULL))
552 return -ENOMEM; 563 return -ENOMEM;
@@ -563,7 +574,6 @@ static void vmw_master_destroy(struct drm_device *dev,
563{ 574{
564 struct vmw_master *vmaster = vmw_master(master); 575 struct vmw_master *vmaster = vmw_master(master);
565 576
566 DRM_INFO("Master destroy.\n");
567 master->driver_priv = NULL; 577 master->driver_priv = NULL;
568 kfree(vmaster); 578 kfree(vmaster);
569} 579}
@@ -579,8 +589,6 @@ static int vmw_master_set(struct drm_device *dev,
579 struct vmw_master *vmaster = vmw_master(file_priv->master); 589 struct vmw_master *vmaster = vmw_master(file_priv->master);
580 int ret = 0; 590 int ret = 0;
581 591
582 DRM_INFO("Master set.\n");
583
584 if (active) { 592 if (active) {
585 BUG_ON(active != &dev_priv->fbdev_master); 593 BUG_ON(active != &dev_priv->fbdev_master);
586 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); 594 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -622,8 +630,6 @@ static void vmw_master_drop(struct drm_device *dev,
622 struct vmw_master *vmaster = vmw_master(file_priv->master); 630 struct vmw_master *vmaster = vmw_master(file_priv->master);
623 int ret; 631 int ret;
624 632
625 DRM_INFO("Master drop.\n");
626
627 /** 633 /**
628 * Make sure the master doesn't disappear while we have 634 * Make sure the master doesn't disappear while we have
629 * it locked. 635 * it locked.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 356dc935ec13..eaad52095339 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,12 +41,13 @@
41 41
42#define VMWGFX_DRIVER_DATE "20100209" 42#define VMWGFX_DRIVER_DATE "20100209"
43#define VMWGFX_DRIVER_MAJOR 1 43#define VMWGFX_DRIVER_MAJOR 1
44#define VMWGFX_DRIVER_MINOR 0 44#define VMWGFX_DRIVER_MINOR 2
45#define VMWGFX_DRIVER_PATCHLEVEL 0 45#define VMWGFX_DRIVER_PATCHLEVEL 0
46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
48#define VMWGFX_MAX_RELOCATIONS 2048 48#define VMWGFX_MAX_RELOCATIONS 2048
49#define VMWGFX_MAX_GMRS 2048 49#define VMWGFX_MAX_GMRS 2048
50#define VMWGFX_MAX_DISPLAYS 16
50 51
51struct vmw_fpriv { 52struct vmw_fpriv {
52 struct drm_master *locked_master; 53 struct drm_master *locked_master;
@@ -102,6 +103,13 @@ struct vmw_surface {
102 struct vmw_cursor_snooper snooper; 103 struct vmw_cursor_snooper snooper;
103}; 104};
104 105
106struct vmw_fence_queue {
107 struct list_head head;
108 struct timespec lag;
109 struct timespec lag_time;
110 spinlock_t lock;
111};
112
105struct vmw_fifo_state { 113struct vmw_fifo_state {
106 unsigned long reserved_size; 114 unsigned long reserved_size;
107 __le32 *dynamic_buffer; 115 __le32 *dynamic_buffer;
@@ -115,6 +123,7 @@ struct vmw_fifo_state {
115 uint32_t capabilities; 123 uint32_t capabilities;
116 struct mutex fifo_mutex; 124 struct mutex fifo_mutex;
117 struct rw_semaphore rwsem; 125 struct rw_semaphore rwsem;
126 struct vmw_fence_queue fence_queue;
118}; 127};
119 128
120struct vmw_relocation { 129struct vmw_relocation {
@@ -144,6 +153,14 @@ struct vmw_master {
144 struct ttm_lock lock; 153 struct ttm_lock lock;
145}; 154};
146 155
156struct vmw_vga_topology_state {
157 uint32_t width;
158 uint32_t height;
159 uint32_t primary;
160 uint32_t pos_x;
161 uint32_t pos_y;
162};
163
147struct vmw_private { 164struct vmw_private {
148 struct ttm_bo_device bdev; 165 struct ttm_bo_device bdev;
149 struct ttm_bo_global_ref bo_global_ref; 166 struct ttm_bo_global_ref bo_global_ref;
@@ -171,14 +188,19 @@ struct vmw_private {
171 * VGA registers. 188 * VGA registers.
172 */ 189 */
173 190
191 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
174 uint32_t vga_width; 192 uint32_t vga_width;
175 uint32_t vga_height; 193 uint32_t vga_height;
176 uint32_t vga_depth; 194 uint32_t vga_depth;
177 uint32_t vga_bpp; 195 uint32_t vga_bpp;
178 uint32_t vga_pseudo; 196 uint32_t vga_pseudo;
179 uint32_t vga_red_mask; 197 uint32_t vga_red_mask;
180 uint32_t vga_blue_mask;
181 uint32_t vga_green_mask; 198 uint32_t vga_green_mask;
199 uint32_t vga_blue_mask;
200 uint32_t vga_bpl;
201 uint32_t vga_pitchlock;
202
203 uint32_t num_displays;
182 204
183 /* 205 /*
184 * Framebuffer info. 206 * Framebuffer info.
@@ -393,6 +415,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
393extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 415extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
394extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); 416extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
395extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); 417extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
418extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
396 419
397/** 420/**
398 * TTM glue - vmwgfx_ttm_glue.c 421 * TTM glue - vmwgfx_ttm_glue.c
@@ -441,6 +464,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
441 uint32_t sequence, 464 uint32_t sequence,
442 bool interruptible, 465 bool interruptible,
443 unsigned long timeout); 466 unsigned long timeout);
467extern void vmw_update_sequence(struct vmw_private *dev_priv,
468 struct vmw_fifo_state *fifo_state);
469
470
471/**
472 * Rudimentary fence objects currently used only for throttling -
473 * vmwgfx_fence.c
474 */
475
476extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
477extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
478extern int vmw_fence_push(struct vmw_fence_queue *queue,
479 uint32_t sequence);
480extern int vmw_fence_pull(struct vmw_fence_queue *queue,
481 uint32_t signaled_sequence);
482extern int vmw_wait_lag(struct vmw_private *dev_priv,
483 struct vmw_fence_queue *queue, uint32_t us);
444 484
445/** 485/**
446 * Kernel framebuffer - vmwgfx_fb.c 486 * Kernel framebuffer - vmwgfx_fb.c
@@ -466,6 +506,11 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
466 struct ttm_object_file *tfile, 506 struct ttm_object_file *tfile,
467 struct ttm_buffer_object *bo, 507 struct ttm_buffer_object *bo,
468 SVGA3dCmdHeader *header); 508 SVGA3dCmdHeader *header);
509void vmw_kms_write_svga(struct vmw_private *vmw_priv,
510 unsigned width, unsigned height, unsigned pitch,
511 unsigned bbp, unsigned depth);
512int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
513 struct drm_file *file_priv);
469 514
470/** 515/**
471 * Overlay control - vmwgfx_overlay.c 516 * Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dbd36b8910cf..bdd67cf83315 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -669,6 +669,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
669 goto out_err; 669 goto out_err;
670 670
671 vmw_apply_relocations(sw_context); 671 vmw_apply_relocations(sw_context);
672
673 if (arg->throttle_us) {
674 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
675 arg->throttle_us);
676
677 if (unlikely(ret != 0))
678 goto out_err;
679 }
680
672 vmw_fifo_commit(dev_priv, arg->command_size); 681 vmw_fifo_commit(dev_priv, arg->command_size);
673 682
674 ret = vmw_fifo_send_fence(dev_priv, &sequence); 683 ret = vmw_fifo_send_fence(dev_priv, &sequence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 7421aaad8d09..b0866f04ec76 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
132 return -EINVAL; 132 return -EINVAL;
133 } 133 }
134 134
135 /* without multimon its hard to resize */ 135 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
136 if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && 136 (var->xoffset != 0 || var->yoffset != 0)) {
137 (var->xres != par->max_width || 137 DRM_ERROR("Can not handle panning without display topology\n");
138 var->yres != par->max_height)) {
139 DRM_ERROR("Tried to resize, but we don't have multimon\n");
140 return -EINVAL; 138 return -EINVAL;
141 } 139 }
142 140
143 if (var->xres > par->max_width || 141 if ((var->xoffset + var->xres) > par->max_width ||
144 var->yres > par->max_height) { 142 (var->yoffset + var->yres) > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n"); 143 DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 return -EINVAL; 144 return -EINVAL;
147 } 145 }
@@ -154,27 +152,11 @@ static int vmw_fb_set_par(struct fb_info *info)
154 struct vmw_fb_par *par = info->par; 152 struct vmw_fb_par *par = info->par;
155 struct vmw_private *vmw_priv = par->vmw_priv; 153 struct vmw_private *vmw_priv = par->vmw_priv;
156 154
157 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { 155 vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
158 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); 156 info->fix.line_length,
159 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); 157 par->bpp, par->depth);
160 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); 158 if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
161 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
162 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
163 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
164 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
165 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
166
167 vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
168 vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
169 vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
170 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
171 vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
172 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
173 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
174 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
175
176 /* TODO check if pitch and offset changes */ 159 /* TODO check if pitch and offset changes */
177
178 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); 160 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); 161 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); 162 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
@@ -183,13 +165,13 @@ static int vmw_fb_set_par(struct fb_info *info)
183 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); 165 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
184 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); 166 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
185 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 167 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
186 } else {
187 vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
188 vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
189
190 /* TODO check if pitch and offset changes */
191 } 168 }
192 169
170 /* This is really helpful since if this fails the user
171 * can probably not see anything on the screen.
172 */
173 WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
174
193 return 0; 175 return 0;
194} 176}
195 177
@@ -416,48 +398,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
416 unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; 398 unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
417 int ret; 399 int ret;
418 400
401 /* XXX These shouldn't be hardcoded. */
419 initial_width = 800; 402 initial_width = 800;
420 initial_height = 600; 403 initial_height = 600;
421 404
422 fb_bbp = 32; 405 fb_bbp = 32;
423 fb_depth = 24; 406 fb_depth = 24;
424 407
425 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { 408 /* XXX As shouldn't these be as well. */
426 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); 409 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
427 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); 410 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
428 } else {
429 fb_width = min(vmw_priv->fb_max_width, initial_width);
430 fb_height = min(vmw_priv->fb_max_height, initial_height);
431 }
432 411
433 initial_width = min(fb_width, initial_width); 412 initial_width = min(fb_width, initial_width);
434 initial_height = min(fb_height, initial_height); 413 initial_height = min(fb_height, initial_height);
435 414
436 vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); 415 fb_pitch = fb_width * fb_bbp / 8;
437 vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); 416 fb_size = fb_pitch * fb_height;
438 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
439 vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
440 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
441 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
442 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
443
444 fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
445 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); 417 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
446 fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
447
448 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
449 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
450 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
451 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
452 DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
453 DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
454 DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
455 DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
456 DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
457 DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
458 DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
459 DRM_DEBUG("fb_pitch %u\n", fb_pitch);
460 DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
461 418
462 info = framebuffer_alloc(sizeof(*par), device); 419 info = framebuffer_alloc(sizeof(*par), device);
463 if (!info) 420 if (!info)
@@ -659,6 +616,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
659 goto err_unlock; 616 goto err_unlock;
660 617
661 ret = ttm_bo_validate(bo, &ne_placement, false, false, false); 618 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
619
620 /* Could probably bug on */
621 WARN_ON(bo->offset != 0);
622
662 ttm_bo_unreserve(bo); 623 ttm_bo_unreserve(bo);
663err_unlock: 624err_unlock:
664 ttm_write_unlock(&vmw_priv->active_master->lock); 625 ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
new file mode 100644
index 000000000000..61eacc1b5ca3
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -0,0 +1,173 @@
1/**************************************************************************
2 *
3 * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "vmwgfx_drv.h"
30
31struct vmw_fence {
32 struct list_head head;
33 uint32_t sequence;
34 struct timespec submitted;
35};
36
37void vmw_fence_queue_init(struct vmw_fence_queue *queue)
38{
39 INIT_LIST_HEAD(&queue->head);
40 queue->lag = ns_to_timespec(0);
41 getrawmonotonic(&queue->lag_time);
42 spin_lock_init(&queue->lock);
43}
44
45void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
46{
47 struct vmw_fence *fence, *next;
48
49 spin_lock(&queue->lock);
50 list_for_each_entry_safe(fence, next, &queue->head, head) {
51 kfree(fence);
52 }
53 spin_unlock(&queue->lock);
54}
55
56int vmw_fence_push(struct vmw_fence_queue *queue,
57 uint32_t sequence)
58{
59 struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
60
61 if (unlikely(!fence))
62 return -ENOMEM;
63
64 fence->sequence = sequence;
65 getrawmonotonic(&fence->submitted);
66 spin_lock(&queue->lock);
67 list_add_tail(&fence->head, &queue->head);
68 spin_unlock(&queue->lock);
69
70 return 0;
71}
72
73int vmw_fence_pull(struct vmw_fence_queue *queue,
74 uint32_t signaled_sequence)
75{
76 struct vmw_fence *fence, *next;
77 struct timespec now;
78 bool updated = false;
79
80 spin_lock(&queue->lock);
81 getrawmonotonic(&now);
82
83 if (list_empty(&queue->head)) {
84 queue->lag = ns_to_timespec(0);
85 queue->lag_time = now;
86 updated = true;
87 goto out_unlock;
88 }
89
90 list_for_each_entry_safe(fence, next, &queue->head, head) {
91 if (signaled_sequence - fence->sequence > (1 << 30))
92 continue;
93
94 queue->lag = timespec_sub(now, fence->submitted);
95 queue->lag_time = now;
96 updated = true;
97 list_del(&fence->head);
98 kfree(fence);
99 }
100
101out_unlock:
102 spin_unlock(&queue->lock);
103
104 return (updated) ? 0 : -EBUSY;
105}
106
107static struct timespec vmw_timespec_add(struct timespec t1,
108 struct timespec t2)
109{
110 t1.tv_sec += t2.tv_sec;
111 t1.tv_nsec += t2.tv_nsec;
112 if (t1.tv_nsec >= 1000000000L) {
113 t1.tv_sec += 1;
114 t1.tv_nsec -= 1000000000L;
115 }
116
117 return t1;
118}
119
120static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
121{
122 struct timespec now;
123
124 spin_lock(&queue->lock);
125 getrawmonotonic(&now);
126 queue->lag = vmw_timespec_add(queue->lag,
127 timespec_sub(now, queue->lag_time));
128 queue->lag_time = now;
129 spin_unlock(&queue->lock);
130 return queue->lag;
131}
132
133
134static bool vmw_lag_lt(struct vmw_fence_queue *queue,
135 uint32_t us)
136{
137 struct timespec lag, cond;
138
139 cond = ns_to_timespec((s64) us * 1000);
140 lag = vmw_fifo_lag(queue);
141 return (timespec_compare(&lag, &cond) < 1);
142}
143
144int vmw_wait_lag(struct vmw_private *dev_priv,
145 struct vmw_fence_queue *queue, uint32_t us)
146{
147 struct vmw_fence *fence;
148 uint32_t sequence;
149 int ret;
150
151 while (!vmw_lag_lt(queue, us)) {
152 spin_lock(&queue->lock);
153 if (list_empty(&queue->head))
154 sequence = atomic_read(&dev_priv->fence_seq);
155 else {
156 fence = list_first_entry(&queue->head,
157 struct vmw_fence, head);
158 sequence = fence->sequence;
159 }
160 spin_unlock(&queue->lock);
161
162 ret = vmw_wait_fence(dev_priv, false, sequence, true,
163 3*HZ);
164
165 if (unlikely(ret != 0))
166 return ret;
167
168 (void) vmw_fence_pull(queue, sequence);
169 }
170 return 0;
171}
172
173
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39d43a01d846..e6a1eb7ea954 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion; 35 uint32_t fifo_min, hwversion;
36 36
37 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
38 return false;
39
37 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); 40 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
38 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) 41 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
39 return false; 42 return false;
@@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
48 return true; 51 return true;
49} 52}
50 53
54bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
55{
56 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
57 uint32_t caps;
58
59 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
60 return false;
61
62 caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
63 if (caps & SVGA_FIFO_CAP_PITCHLOCK)
64 return true;
65
66 return false;
67}
68
51int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 69int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
52{ 70{
53 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 71 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
120 138
121 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); 139 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 140 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
123 141 vmw_fence_queue_init(&fifo->fence_queue);
124 return vmw_fifo_send_fence(dev_priv, &dummy); 142 return vmw_fifo_send_fence(dev_priv, &dummy);
125out_err: 143out_err:
126 vfree(fifo->static_buffer); 144 vfree(fifo->static_buffer);
@@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
159 dev_priv->enable_state); 177 dev_priv->enable_state);
160 178
161 mutex_unlock(&dev_priv->hw_mutex); 179 mutex_unlock(&dev_priv->hw_mutex);
180 vmw_fence_queue_takedown(&fifo->fence_queue);
162 181
163 if (likely(fifo->last_buffer != NULL)) { 182 if (likely(fifo->last_buffer != NULL)) {
164 vfree(fifo->last_buffer); 183 vfree(fifo->last_buffer);
@@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
484 fifo_state->last_buffer_add = true; 503 fifo_state->last_buffer_add = true;
485 vmw_fifo_commit(dev_priv, bytes); 504 vmw_fifo_commit(dev_priv, bytes);
486 fifo_state->last_buffer_add = false; 505 fifo_state->last_buffer_add = false;
506 (void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
507 vmw_update_sequence(dev_priv, fifo_state);
487 508
488out_err: 509out_err:
489 return ret; 510 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 4d7cb5393860..e92298a6a383 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
64 return (busy == 0); 64 return (busy == 0);
65} 65}
66 66
67void vmw_update_sequence(struct vmw_private *dev_priv,
68 struct vmw_fifo_state *fifo_state)
69{
70 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
71
72 uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
73
74 if (dev_priv->last_read_sequence != sequence) {
75 dev_priv->last_read_sequence = sequence;
76 vmw_fence_pull(&fifo_state->fence_queue, sequence);
77 }
78}
67 79
68bool vmw_fence_signaled(struct vmw_private *dev_priv, 80bool vmw_fence_signaled(struct vmw_private *dev_priv,
69 uint32_t sequence) 81 uint32_t sequence)
70{ 82{
71 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
72 struct vmw_fifo_state *fifo_state; 83 struct vmw_fifo_state *fifo_state;
73 bool ret; 84 bool ret;
74 85
75 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) 86 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
76 return true; 87 return true;
77 88
78 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); 89 fifo_state = &dev_priv->fifo;
90 vmw_update_sequence(dev_priv, fifo_state);
79 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) 91 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
80 return true; 92 return true;
81 93
82 fifo_state = &dev_priv->fifo;
83 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && 94 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
84 vmw_fifo_idle(dev_priv, sequence)) 95 vmw_fifo_idle(dev_priv, sequence))
85 return true; 96 return true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bbc7c4c30bc7..f1d626112415 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -30,6 +30,8 @@
30/* Might need a hrtimer here? */ 30/* Might need a hrtimer here? */
31#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 31#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
32 32
33static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
34static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
33 35
34void vmw_display_unit_cleanup(struct vmw_display_unit *du) 36void vmw_display_unit_cleanup(struct vmw_display_unit *du)
35{ 37{
@@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
326struct vmw_framebuffer_surface { 328struct vmw_framebuffer_surface {
327 struct vmw_framebuffer base; 329 struct vmw_framebuffer base;
328 struct vmw_surface *surface; 330 struct vmw_surface *surface;
331 struct vmw_dma_buffer *buffer;
329 struct delayed_work d_work; 332 struct delayed_work d_work;
330 struct mutex work_lock; 333 struct mutex work_lock;
331 bool present_fs; 334 bool present_fs;
@@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
500 vfbs->base.base.depth = 24; 503 vfbs->base.base.depth = 24;
501 vfbs->base.base.width = width; 504 vfbs->base.base.width = width;
502 vfbs->base.base.height = height; 505 vfbs->base.base.height = height;
503 vfbs->base.pin = NULL; 506 vfbs->base.pin = &vmw_surface_dmabuf_pin;
504 vfbs->base.unpin = NULL; 507 vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
505 vfbs->surface = surface; 508 vfbs->surface = surface;
506 mutex_init(&vfbs->work_lock); 509 mutex_init(&vfbs->work_lock);
507 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); 510 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
@@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
589 .create_handle = vmw_framebuffer_create_handle, 592 .create_handle = vmw_framebuffer_create_handle,
590}; 593};
591 594
595static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
596{
597 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
598 struct vmw_framebuffer_surface *vfbs =
599 vmw_framebuffer_to_vfbs(&vfb->base);
600 unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
601 int ret;
602
603 vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
604 if (unlikely(vfbs->buffer == NULL))
605 return -ENOMEM;
606
607 vmw_overlay_pause_all(dev_priv);
608 ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
609 &vmw_vram_ne_placement,
610 false, &vmw_dmabuf_bo_free);
611 vmw_overlay_resume_all(dev_priv);
612
613 return ret;
614}
615
616static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
617{
618 struct ttm_buffer_object *bo;
619 struct vmw_framebuffer_surface *vfbs =
620 vmw_framebuffer_to_vfbs(&vfb->base);
621
622 bo = &vfbs->buffer->base;
623 ttm_bo_unref(&bo);
624 vfbs->buffer = NULL;
625
626 return 0;
627}
628
592static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) 629static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
593{ 630{
594 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 631 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
596 vmw_framebuffer_to_vfbd(&vfb->base); 633 vmw_framebuffer_to_vfbd(&vfb->base);
597 int ret; 634 int ret;
598 635
636
599 vmw_overlay_pause_all(dev_priv); 637 vmw_overlay_pause_all(dev_priv);
600 638
601 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); 639 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
602 640
603 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
604 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
605 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
606 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
607 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
608 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
609 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
610 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
611 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
612
613 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
614 vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
615 vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
616 vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
617 vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
618 vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
619 vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
620 vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
621 } else
622 WARN_ON(true);
623
624 vmw_overlay_resume_all(dev_priv); 641 vmw_overlay_resume_all(dev_priv);
625 642
643 WARN_ON(ret != 0);
644
626 return 0; 645 return 0;
627} 646}
628 647
@@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
668 687
669 /* XXX get the first 3 from the surface info */ 688 /* XXX get the first 3 from the surface info */
670 vfbd->base.base.bits_per_pixel = 32; 689 vfbd->base.base.bits_per_pixel = 32;
671 vfbd->base.base.pitch = width * 32 / 4; 690 vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
672 vfbd->base.base.depth = 24; 691 vfbd->base.base.depth = 24;
673 vfbd->base.base.width = width; 692 vfbd->base.base.width = width;
674 vfbd->base.base.height = height; 693 vfbd->base.base.height = height;
@@ -765,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
765 dev->mode_config.funcs = &vmw_kms_funcs; 784 dev->mode_config.funcs = &vmw_kms_funcs;
766 dev->mode_config.min_width = 1; 785 dev->mode_config.min_width = 1;
767 dev->mode_config.min_height = 1; 786 dev->mode_config.min_height = 1;
768 dev->mode_config.max_width = dev_priv->fb_max_width; 787 /* assumed largest fb size */
769 dev->mode_config.max_height = dev_priv->fb_max_height; 788 dev->mode_config.max_width = 8192;
789 dev->mode_config.max_height = 8192;
770 790
771 ret = vmw_kms_init_legacy_display_system(dev_priv); 791 ret = vmw_kms_init_legacy_display_system(dev_priv);
772 792
@@ -826,49 +846,140 @@ out:
826 return ret; 846 return ret;
827} 847}
828 848
849void vmw_kms_write_svga(struct vmw_private *vmw_priv,
850 unsigned width, unsigned height, unsigned pitch,
851 unsigned bbp, unsigned depth)
852{
853 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
854 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
855 else if (vmw_fifo_have_pitchlock(vmw_priv))
856 iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
857 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
858 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
859 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
860 vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
861 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
862 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
863 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
864}
865
829int vmw_kms_save_vga(struct vmw_private *vmw_priv) 866int vmw_kms_save_vga(struct vmw_private *vmw_priv)
830{ 867{
831 /* 868 struct vmw_vga_topology_state *save;
832 * setup a single multimon monitor with the size 869 uint32_t i;
833 * of 0x0, this stops the UI from resizing when we
834 * change the framebuffer size
835 */
836 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
837 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
838 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
839 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
840 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
841 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
842 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
843 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
844 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
845 }
846 870
847 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); 871 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
848 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); 872 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
849 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
850 vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); 873 vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
874 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
851 vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); 875 vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
852 vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); 876 vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
853 vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
854 vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); 877 vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
878 vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
879 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
880 vmw_priv->vga_pitchlock =
881 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
882 else if (vmw_fifo_have_pitchlock(vmw_priv))
883 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
884 SVGA_FIFO_PITCHLOCK);
885
886 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
887 return 0;
855 888
889 vmw_priv->num_displays = vmw_read(vmw_priv,
890 SVGA_REG_NUM_GUEST_DISPLAYS);
891
892 for (i = 0; i < vmw_priv->num_displays; ++i) {
893 save = &vmw_priv->vga_save[i];
894 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
895 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
896 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
897 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
898 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
899 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
900 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
901 }
856 return 0; 902 return 0;
857} 903}
858 904
859int vmw_kms_restore_vga(struct vmw_private *vmw_priv) 905int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
860{ 906{
907 struct vmw_vga_topology_state *save;
908 uint32_t i;
909
861 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); 910 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
862 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); 911 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
863 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
864 vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); 912 vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
913 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
865 vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); 914 vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
866 vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); 915 vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
867 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); 916 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
868 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); 917 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
918 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
919 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
920 vmw_priv->vga_pitchlock);
921 else if (vmw_fifo_have_pitchlock(vmw_priv))
922 iowrite32(vmw_priv->vga_pitchlock,
923 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
924
925 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
926 return 0;
869 927
870 /* TODO check for multimon */ 928 for (i = 0; i < vmw_priv->num_displays; ++i) {
871 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); 929 save = &vmw_priv->vga_save[i];
930 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
931 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
932 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
933 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
934 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
935 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
936 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
937 }
872 938
873 return 0; 939 return 0;
874} 940}
941
942int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
943 struct drm_file *file_priv)
944{
945 struct vmw_private *dev_priv = vmw_priv(dev);
946 struct drm_vmw_update_layout_arg *arg =
947 (struct drm_vmw_update_layout_arg *)data;
948 struct vmw_master *vmaster = vmw_master(file_priv->master);
949 void __user *user_rects;
950 struct drm_vmw_rect *rects;
951 unsigned rects_size;
952 int ret;
953
954 ret = ttm_read_lock(&vmaster->lock, true);
955 if (unlikely(ret != 0))
956 return ret;
957
958 if (!arg->num_outputs) {
959 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
960 vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
961 goto out_unlock;
962 }
963
964 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
965 rects = kzalloc(rects_size, GFP_KERNEL);
966 if (unlikely(!rects)) {
967 ret = -ENOMEM;
968 goto out_unlock;
969 }
970
971 user_rects = (void __user *)(unsigned long)arg->rects;
972 ret = copy_from_user(rects, user_rects, rects_size);
973 if (unlikely(ret != 0)) {
974 DRM_ERROR("Failed to get rects.\n");
975 goto out_free;
976 }
977
978 vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
979
980out_free:
981 kfree(rects);
982out_unlock:
983 ttm_read_unlock(&vmaster->lock);
984 return ret;
985}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8b95249f0531..8a398a0339b6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
94int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); 94int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
95 95
96/* 96/*
97 * Legacy display unit functions - vmwgfx_ldu.h 97 * Legacy display unit functions - vmwgfx_ldu.c
98 */ 98 */
99int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); 99int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
100int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); 100int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
101int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
102 struct drm_vmw_rect *rects);
101 103
102#endif 104#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 90891593bf6c..cfaf690a5b2f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -38,6 +38,7 @@ struct vmw_legacy_display {
38 struct list_head active; 38 struct list_head active;
39 39
40 unsigned num_active; 40 unsigned num_active;
41 unsigned last_num_active;
41 42
42 struct vmw_framebuffer *fb; 43 struct vmw_framebuffer *fb;
43}; 44};
@@ -48,9 +49,12 @@ struct vmw_legacy_display {
48struct vmw_legacy_display_unit { 49struct vmw_legacy_display_unit {
49 struct vmw_display_unit base; 50 struct vmw_display_unit base;
50 51
51 struct list_head active; 52 unsigned pref_width;
53 unsigned pref_height;
54 bool pref_active;
55 struct drm_display_mode *pref_mode;
52 56
53 unsigned unit; 57 struct list_head active;
54}; 58};
55 59
56static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) 60static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
@@ -88,23 +92,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
88{ 92{
89 struct vmw_legacy_display *lds = dev_priv->ldu_priv; 93 struct vmw_legacy_display *lds = dev_priv->ldu_priv;
90 struct vmw_legacy_display_unit *entry; 94 struct vmw_legacy_display_unit *entry;
91 struct drm_crtc *crtc; 95 struct drm_framebuffer *fb = NULL;
96 struct drm_crtc *crtc = NULL;
92 int i = 0; 97 int i = 0;
93 98
94 /* to stop the screen from changing size on resize */ 99 /* If there is no display topology the host just assumes
95 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); 100 * that the guest will set the same layout as the host.
96 for (i = 0; i < lds->num_active; i++) { 101 */
97 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); 102 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
98 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); 103 int w = 0, h = 0;
99 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); 104 list_for_each_entry(entry, &lds->active, active) {
100 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); 105 crtc = &entry->base.crtc;
101 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); 106 w = max(w, crtc->x + crtc->mode.hdisplay);
102 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); 107 h = max(h, crtc->y + crtc->mode.vdisplay);
103 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 108 i++;
109 }
110
111 if (crtc == NULL)
112 return 0;
113 fb = entry->base.crtc.fb;
114
115 vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
116 fb->bits_per_pixel, fb->depth);
117
118 return 0;
104 } 119 }
105 120
106 /* Now set the mode */ 121 if (!list_empty(&lds->active)) {
107 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active); 122 entry = list_entry(lds->active.next, typeof(*entry), active);
123 fb = entry->base.crtc.fb;
124
125 vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
126 fb->bits_per_pixel, fb->depth);
127 }
128
129 /* Make sure we always show something. */
130 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
131 lds->num_active ? lds->num_active : 1);
132
108 i = 0; 133 i = 0;
109 list_for_each_entry(entry, &lds->active, active) { 134 list_for_each_entry(entry, &lds->active, active) {
110 crtc = &entry->base.crtc; 135 crtc = &entry->base.crtc;
@@ -120,6 +145,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
120 i++; 145 i++;
121 } 146 }
122 147
148 BUG_ON(i != lds->num_active);
149
150 lds->last_num_active = lds->num_active;
151
123 return 0; 152 return 0;
124} 153}
125 154
@@ -130,6 +159,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
130 if (list_empty(&ldu->active)) 159 if (list_empty(&ldu->active))
131 return 0; 160 return 0;
132 161
162 /* Must init otherwise list_empty(&ldu->active) will not work. */
133 list_del_init(&ldu->active); 163 list_del_init(&ldu->active);
134 if (--(ld->num_active) == 0) { 164 if (--(ld->num_active) == 0) {
135 BUG_ON(!ld->fb); 165 BUG_ON(!ld->fb);
@@ -149,24 +179,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
149 struct vmw_legacy_display_unit *entry; 179 struct vmw_legacy_display_unit *entry;
150 struct list_head *at; 180 struct list_head *at;
151 181
182 BUG_ON(!ld->num_active && ld->fb);
183 if (vfb != ld->fb) {
184 if (ld->fb && ld->fb->unpin)
185 ld->fb->unpin(ld->fb);
186 if (vfb->pin)
187 vfb->pin(vfb);
188 ld->fb = vfb;
189 }
190
152 if (!list_empty(&ldu->active)) 191 if (!list_empty(&ldu->active))
153 return 0; 192 return 0;
154 193
155 at = &ld->active; 194 at = &ld->active;
156 list_for_each_entry(entry, &ld->active, active) { 195 list_for_each_entry(entry, &ld->active, active) {
157 if (entry->unit > ldu->unit) 196 if (entry->base.unit > ldu->base.unit)
158 break; 197 break;
159 198
160 at = &entry->active; 199 at = &entry->active;
161 } 200 }
162 201
163 list_add(&ldu->active, at); 202 list_add(&ldu->active, at);
164 if (ld->num_active++ == 0) { 203
165 BUG_ON(ld->fb); 204 ld->num_active++;
166 if (vfb->pin)
167 vfb->pin(vfb);
168 ld->fb = vfb;
169 }
170 205
171 return 0; 206 return 0;
172} 207}
@@ -208,6 +243,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
208 243
209 /* ldu only supports one fb active at the time */ 244 /* ldu only supports one fb active at the time */
210 if (dev_priv->ldu_priv->fb && vfb && 245 if (dev_priv->ldu_priv->fb && vfb &&
246 !(dev_priv->ldu_priv->num_active == 1 &&
247 !list_empty(&ldu->active)) &&
211 dev_priv->ldu_priv->fb != vfb) { 248 dev_priv->ldu_priv->fb != vfb) {
212 DRM_ERROR("Multiple framebuffers not supported\n"); 249 DRM_ERROR("Multiple framebuffers not supported\n");
213 return -EINVAL; 250 return -EINVAL;
@@ -300,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
300static enum drm_connector_status 337static enum drm_connector_status
301 vmw_ldu_connector_detect(struct drm_connector *connector) 338 vmw_ldu_connector_detect(struct drm_connector *connector)
302{ 339{
303 /* XXX vmwctrl should control connection status */ 340 if (vmw_connector_to_ldu(connector)->pref_active)
304 if (vmw_connector_to_ldu(connector)->base.unit == 0)
305 return connector_status_connected; 341 return connector_status_connected;
306 return connector_status_disconnected; 342 return connector_status_disconnected;
307} 343}
@@ -312,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
312 752, 800, 0, 480, 489, 492, 525, 0, 348 752, 800, 0, 480, 489, 492, 525, 0,
313 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 349 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
314 /* 800x600@60Hz */ 350 /* 800x600@60Hz */
315 { DRM_MODE("800x600", 351 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
316 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 352 968, 1056, 0, 600, 601, 605, 628, 0,
317 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, 353 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
318 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
319 /* 1024x768@60Hz */ 354 /* 1024x768@60Hz */
320 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 355 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
321 1184, 1344, 0, 768, 771, 777, 806, 0, 356 1184, 1344, 0, 768, 771, 777, 806, 0,
@@ -387,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
387static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, 422static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
388 uint32_t max_width, uint32_t max_height) 423 uint32_t max_width, uint32_t max_height)
389{ 424{
425 struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
390 struct drm_device *dev = connector->dev; 426 struct drm_device *dev = connector->dev;
391 struct drm_display_mode *mode = NULL; 427 struct drm_display_mode *mode = NULL;
428 struct drm_display_mode prefmode = { DRM_MODE("preferred",
429 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
430 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
431 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
432 };
392 int i; 433 int i;
393 434
435 /* Add preferred mode */
436 {
437 mode = drm_mode_duplicate(dev, &prefmode);
438 if (!mode)
439 return 0;
440 mode->hdisplay = ldu->pref_width;
441 mode->vdisplay = ldu->pref_height;
442 mode->vrefresh = drm_mode_vrefresh(mode);
443 drm_mode_probed_add(connector, mode);
444
445 if (ldu->pref_mode) {
446 list_del_init(&ldu->pref_mode->head);
447 drm_mode_destroy(dev, ldu->pref_mode);
448 }
449
450 ldu->pref_mode = mode;
451 }
452
394 for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { 453 for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
395 if (vmw_ldu_connector_builtin[i].hdisplay > max_width || 454 if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
396 vmw_ldu_connector_builtin[i].vdisplay > max_height) 455 vmw_ldu_connector_builtin[i].vdisplay > max_height)
@@ -443,18 +502,21 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
443 if (!ldu) 502 if (!ldu)
444 return -ENOMEM; 503 return -ENOMEM;
445 504
446 ldu->unit = unit; 505 ldu->base.unit = unit;
447 crtc = &ldu->base.crtc; 506 crtc = &ldu->base.crtc;
448 encoder = &ldu->base.encoder; 507 encoder = &ldu->base.encoder;
449 connector = &ldu->base.connector; 508 connector = &ldu->base.connector;
450 509
510 INIT_LIST_HEAD(&ldu->active);
511
512 ldu->pref_active = (unit == 0);
513 ldu->pref_width = 800;
514 ldu->pref_height = 600;
515 ldu->pref_mode = NULL;
516
451 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 517 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
452 DRM_MODE_CONNECTOR_LVDS); 518 DRM_MODE_CONNECTOR_LVDS);
453 /* Initial status */ 519 connector->status = vmw_ldu_connector_detect(connector);
454 if (unit == 0)
455 connector->status = connector_status_connected;
456 else
457 connector->status = connector_status_disconnected;
458 520
459 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, 521 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
460 DRM_MODE_ENCODER_LVDS); 522 DRM_MODE_ENCODER_LVDS);
@@ -462,8 +524,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
462 encoder->possible_crtcs = (1 << unit); 524 encoder->possible_crtcs = (1 << unit);
463 encoder->possible_clones = 0; 525 encoder->possible_clones = 0;
464 526
465 INIT_LIST_HEAD(&ldu->active);
466
467 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); 527 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
468 528
469 drm_connector_attach_property(connector, 529 drm_connector_attach_property(connector,
@@ -487,18 +547,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
487 547
488 INIT_LIST_HEAD(&dev_priv->ldu_priv->active); 548 INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
489 dev_priv->ldu_priv->num_active = 0; 549 dev_priv->ldu_priv->num_active = 0;
550 dev_priv->ldu_priv->last_num_active = 0;
490 dev_priv->ldu_priv->fb = NULL; 551 dev_priv->ldu_priv->fb = NULL;
491 552
492 drm_mode_create_dirty_info_property(dev_priv->dev); 553 drm_mode_create_dirty_info_property(dev_priv->dev);
493 554
494 vmw_ldu_init(dev_priv, 0); 555 vmw_ldu_init(dev_priv, 0);
495 vmw_ldu_init(dev_priv, 1); 556 /* for old hardware without multimon only enable one display */
496 vmw_ldu_init(dev_priv, 2); 557 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
497 vmw_ldu_init(dev_priv, 3); 558 vmw_ldu_init(dev_priv, 1);
498 vmw_ldu_init(dev_priv, 4); 559 vmw_ldu_init(dev_priv, 2);
499 vmw_ldu_init(dev_priv, 5); 560 vmw_ldu_init(dev_priv, 3);
500 vmw_ldu_init(dev_priv, 6); 561 vmw_ldu_init(dev_priv, 4);
501 vmw_ldu_init(dev_priv, 7); 562 vmw_ldu_init(dev_priv, 5);
563 vmw_ldu_init(dev_priv, 6);
564 vmw_ldu_init(dev_priv, 7);
565 }
502 566
503 return 0; 567 return 0;
504} 568}
@@ -514,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
514 578
515 return 0; 579 return 0;
516} 580}
581
582int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
583 struct drm_vmw_rect *rects)
584{
585 struct drm_device *dev = dev_priv->dev;
586 struct vmw_legacy_display_unit *ldu;
587 struct drm_connector *con;
588 int i;
589
590 mutex_lock(&dev->mode_config.mutex);
591
592#if 0
593 DRM_INFO("%s: new layout ", __func__);
594 for (i = 0; i < (int)num; i++)
595 DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
596 rects[i].w, rects[i].h);
597 DRM_INFO("\n");
598#else
599 (void)i;
600#endif
601
602 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
603 ldu = vmw_connector_to_ldu(con);
604 if (num > ldu->base.unit) {
605 ldu->pref_width = rects[ldu->base.unit].w;
606 ldu->pref_height = rects[ldu->base.unit].h;
607 ldu->pref_active = true;
608 } else {
609 ldu->pref_width = 800;
610 ldu->pref_height = 600;
611 ldu->pref_active = false;
612 }
613 con->status = vmw_ldu_connector_detect(con);
614 }
615
616 mutex_unlock(&dev->mode_config.mutex);
617
618 return 0;
619}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index ad566c85b075..df2036ed18d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
358 if (stream->buf != buf) 358 if (stream->buf != buf)
359 stream->buf = vmw_dmabuf_reference(buf); 359 stream->buf = vmw_dmabuf_reference(buf);
360 stream->saved = *arg; 360 stream->saved = *arg;
361 /* stream is no longer stopped/paused */
362 stream->paused = false;
361 363
362 return 0; 364 return 0;
363} 365}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 441e38c95a85..b87569e96b16 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1,12 +1,32 @@
1/* 1/*
2 * vgaarb.c 2 * vgaarb.c: Implements the VGA arbitration. For details refer to
3 * Documentation/vgaarbiter.txt
4 *
3 * 5 *
4 * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> 6 * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> 7 * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
6 * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> 8 * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
7 * 9 *
8 * Implements the VGA arbitration. For details refer to 10 * Permission is hereby granted, free of charge, to any person obtaining a
9 * Documentation/vgaarbiter.txt 11 * copy of this software and associated documentation files (the "Software"),
12 * to deal in the Software without restriction, including without limitation
13 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14 * and/or sell copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the next
18 * paragraph) shall be included in all copies or substantial portions of the
19 * Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 * DEALINGS
28 * IN THE SOFTWARE.
29 *
10 */ 30 */
11 31
12#include <linux/module.h> 32#include <linux/module.h>
@@ -155,8 +175,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
155 (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) 175 (vgadev->decodes & VGA_RSRC_LEGACY_MEM))
156 rsrc |= VGA_RSRC_LEGACY_MEM; 176 rsrc |= VGA_RSRC_LEGACY_MEM;
157 177
158 pr_devel("%s: %d\n", __func__, rsrc); 178 pr_debug("%s: %d\n", __func__, rsrc);
159 pr_devel("%s: owns: %d\n", __func__, vgadev->owns); 179 pr_debug("%s: owns: %d\n", __func__, vgadev->owns);
160 180
161 /* Check what resources we need to acquire */ 181 /* Check what resources we need to acquire */
162 wants = rsrc & ~vgadev->owns; 182 wants = rsrc & ~vgadev->owns;
@@ -268,7 +288,7 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
268{ 288{
269 unsigned int old_locks = vgadev->locks; 289 unsigned int old_locks = vgadev->locks;
270 290
271 pr_devel("%s\n", __func__); 291 pr_debug("%s\n", __func__);
272 292
273 /* Update our counters, and account for equivalent legacy resources 293 /* Update our counters, and account for equivalent legacy resources
274 * if we decode them 294 * if we decode them
@@ -575,6 +595,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
575 else 595 else
576 vga_decode_count--; 596 vga_decode_count--;
577 } 597 }
598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
578} 599}
579 600
580void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) 601void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
@@ -831,7 +852,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
831 curr_pos += 5; 852 curr_pos += 5;
832 remaining -= 5; 853 remaining -= 5;
833 854
834 pr_devel("client 0x%p called 'lock'\n", priv); 855 pr_debug("client 0x%p called 'lock'\n", priv);
835 856
836 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { 857 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
837 ret_val = -EPROTO; 858 ret_val = -EPROTO;
@@ -867,7 +888,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
867 curr_pos += 7; 888 curr_pos += 7;
868 remaining -= 7; 889 remaining -= 7;
869 890
870 pr_devel("client 0x%p called 'unlock'\n", priv); 891 pr_debug("client 0x%p called 'unlock'\n", priv);
871 892
872 if (strncmp(curr_pos, "all", 3) == 0) 893 if (strncmp(curr_pos, "all", 3) == 0)
873 io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; 894 io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
@@ -917,7 +938,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
917 curr_pos += 8; 938 curr_pos += 8;
918 remaining -= 8; 939 remaining -= 8;
919 940
920 pr_devel("client 0x%p called 'trylock'\n", priv); 941 pr_debug("client 0x%p called 'trylock'\n", priv);
921 942
922 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { 943 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
923 ret_val = -EPROTO; 944 ret_val = -EPROTO;
@@ -961,7 +982,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
961 982
962 curr_pos += 7; 983 curr_pos += 7;
963 remaining -= 7; 984 remaining -= 7;
964 pr_devel("client 0x%p called 'target'\n", priv); 985 pr_debug("client 0x%p called 'target'\n", priv);
965 /* if target is default */ 986 /* if target is default */
966 if (!strncmp(curr_pos, "default", 7)) 987 if (!strncmp(curr_pos, "default", 7))
967 pdev = pci_dev_get(vga_default_device()); 988 pdev = pci_dev_get(vga_default_device());
@@ -971,11 +992,11 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
971 ret_val = -EPROTO; 992 ret_val = -EPROTO;
972 goto done; 993 goto done;
973 } 994 }
974 pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, 995 pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
975 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 996 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
976 997
977 pbus = pci_find_bus(domain, bus); 998 pbus = pci_find_bus(domain, bus);
978 pr_devel("vgaarb: pbus %p\n", pbus); 999 pr_debug("vgaarb: pbus %p\n", pbus);
979 if (pbus == NULL) { 1000 if (pbus == NULL) {
980 pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n", 1001 pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n",
981 domain, bus); 1002 domain, bus);
@@ -983,7 +1004,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
983 goto done; 1004 goto done;
984 } 1005 }
985 pdev = pci_get_slot(pbus, devfn); 1006 pdev = pci_get_slot(pbus, devfn);
986 pr_devel("vgaarb: pdev %p\n", pdev); 1007 pr_debug("vgaarb: pdev %p\n", pdev);
987 if (!pdev) { 1008 if (!pdev) {
988 pr_err("vgaarb: invalid PCI address %x:%x\n", 1009 pr_err("vgaarb: invalid PCI address %x:%x\n",
989 bus, devfn); 1010 bus, devfn);
@@ -993,7 +1014,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
993 } 1014 }
994 1015
995 vgadev = vgadev_find(pdev); 1016 vgadev = vgadev_find(pdev);
996 pr_devel("vgaarb: vgadev %p\n", vgadev); 1017 pr_debug("vgaarb: vgadev %p\n", vgadev);
997 if (vgadev == NULL) { 1018 if (vgadev == NULL) {
998 pr_err("vgaarb: this pci device is not a vga device\n"); 1019 pr_err("vgaarb: this pci device is not a vga device\n");
999 pci_dev_put(pdev); 1020 pci_dev_put(pdev);
@@ -1029,7 +1050,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
1029 } else if (strncmp(curr_pos, "decodes ", 8) == 0) { 1050 } else if (strncmp(curr_pos, "decodes ", 8) == 0) {
1030 curr_pos += 8; 1051 curr_pos += 8;
1031 remaining -= 8; 1052 remaining -= 8;
1032 pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv); 1053 pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
1033 1054
1034 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { 1055 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
1035 ret_val = -EPROTO; 1056 ret_val = -EPROTO;
@@ -1058,7 +1079,7 @@ static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
1058{ 1079{
1059 struct vga_arb_private *priv = file->private_data; 1080 struct vga_arb_private *priv = file->private_data;
1060 1081
1061 pr_devel("%s\n", __func__); 1082 pr_debug("%s\n", __func__);
1062 1083
1063 if (priv == NULL) 1084 if (priv == NULL)
1064 return -ENODEV; 1085 return -ENODEV;
@@ -1071,7 +1092,7 @@ static int vga_arb_open(struct inode *inode, struct file *file)
1071 struct vga_arb_private *priv; 1092 struct vga_arb_private *priv;
1072 unsigned long flags; 1093 unsigned long flags;
1073 1094
1074 pr_devel("%s\n", __func__); 1095 pr_debug("%s\n", __func__);
1075 1096
1076 priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); 1097 priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL);
1077 if (priv == NULL) 1098 if (priv == NULL)
@@ -1101,7 +1122,7 @@ static int vga_arb_release(struct inode *inode, struct file *file)
1101 unsigned long flags; 1122 unsigned long flags;
1102 int i; 1123 int i;
1103 1124
1104 pr_devel("%s\n", __func__); 1125 pr_debug("%s\n", __func__);
1105 1126
1106 if (priv == NULL) 1127 if (priv == NULL)
1107 return -ENODEV; 1128 return -ENODEV;
@@ -1112,7 +1133,7 @@ static int vga_arb_release(struct inode *inode, struct file *file)
1112 uc = &priv->cards[i]; 1133 uc = &priv->cards[i];
1113 if (uc->pdev == NULL) 1134 if (uc->pdev == NULL)
1114 continue; 1135 continue;
1115 pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n", 1136 pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n",
1116 uc->io_cnt, uc->mem_cnt); 1137 uc->io_cnt, uc->mem_cnt);
1117 while (uc->io_cnt--) 1138 while (uc->io_cnt--)
1118 vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); 1139 vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
@@ -1165,7 +1186,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action,
1165 struct pci_dev *pdev = to_pci_dev(dev); 1186 struct pci_dev *pdev = to_pci_dev(dev);
1166 bool notify = false; 1187 bool notify = false;
1167 1188
1168 pr_devel("%s\n", __func__); 1189 pr_debug("%s\n", __func__);
1169 1190
1170 /* For now we're only intereted in devices added and removed. I didn't 1191 /* For now we're only intereted in devices added and removed. I didn't
1171 * test this thing here, so someone needs to double check for the 1192 * test this thing here, so someone needs to double check for the