aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_crtc.c30
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c44
-rw-r--r--drivers/gpu/drm/drm_drv.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c1
-rw-r--r--drivers/gpu/drm/drm_stub.c3
-rw-r--r--drivers/gpu/drm/i2c/Makefile3
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c462
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c81
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h64
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c95
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h48
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h64
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c99
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c4
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h187
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c9
-rw-r--r--drivers/gpu/drm/mga/mga_state.c47
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c4
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig9
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c52
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h122
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c4
-rw-r--r--drivers/gpu/drm/r128/r128_state.c121
-rw-r--r--drivers/gpu/drm/radeon/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/atom.c9
-rw-r--r--drivers/gpu/drm/radeon/atom.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c172
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c18
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h5
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h5
-rw-r--r--drivers/gpu/drm/radeon/r100.c37
-rw-r--r--drivers/gpu/drm/radeon/r300.c5
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h5
-rw-r--r--drivers/gpu/drm/radeon/r520.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c42
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c1115
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c235
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon.h50
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c82
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c12
-rw-r--r--drivers/gpu/drm/radeon/rv515.c3
-rw-r--r--drivers/gpu/drm/radeon/rv770.c23
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h6
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c6
-rw-r--r--drivers/gpu/drm/via/via_dma.c120
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c71
-rw-r--r--drivers/gpu/drm/via/via_dmablit.h8
-rw-r--r--drivers/gpu/drm/via/via_drv.h22
-rw-r--r--drivers/gpu/drm/via/via_irq.c13
-rw-r--r--drivers/gpu/drm/via/via_map.c4
-rw-r--r--drivers/gpu/drm/via/via_mm.c7
-rw-r--r--drivers/gpu/drm/via/via_verifier.c47
-rw-r--r--drivers/gpu/drm/via/via_verifier.h4
-rw-r--r--drivers/gpu/drm/via/via_video.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
82 files changed, 2422 insertions, 1646 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 5b7a1a4692a0..4cab0c6397e3 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -61,6 +61,7 @@ config DRM_RADEON
61 select DRM_KMS_HELPER 61 select DRM_KMS_HELPER
62 select DRM_TTM 62 select DRM_TTM
63 select POWER_SUPPLY 63 select POWER_SUPPLY
64 select HWMON
64 help 65 help
65 Choose this option if you have an ATI Radeon graphics card. There 66 Choose this option if you have an ATI Radeon graphics card. There
66 are both PCI and AGP versions. You don't need to choose this to 67 are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 57cea01c4ffb..4c68f76993d8 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -80,6 +80,7 @@ static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
80{ 80{
81 { DRM_MODE_DITHERING_OFF, "Off" }, 81 { DRM_MODE_DITHERING_OFF, "Off" },
82 { DRM_MODE_DITHERING_ON, "On" }, 82 { DRM_MODE_DITHERING_ON, "On" },
83 { DRM_MODE_DITHERING_AUTO, "Automatic" },
83}; 84};
84 85
85/* 86/*
@@ -1126,7 +1127,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1126 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { 1127 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
1127 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 1128 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
1128 head) { 1129 head) {
1129 DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id); 1130 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
1130 if (put_user(crtc->base.id, crtc_id + copied)) { 1131 if (put_user(crtc->base.id, crtc_id + copied)) {
1131 ret = -EFAULT; 1132 ret = -EFAULT;
1132 goto out; 1133 goto out;
@@ -1154,8 +1155,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1154 list_for_each_entry(encoder, 1155 list_for_each_entry(encoder,
1155 &dev->mode_config.encoder_list, 1156 &dev->mode_config.encoder_list,
1156 head) { 1157 head) {
1157 DRM_DEBUG_KMS("ENCODER ID is %d\n", 1158 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
1158 encoder->base.id); 1159 drm_get_encoder_name(encoder));
1159 if (put_user(encoder->base.id, encoder_id + 1160 if (put_user(encoder->base.id, encoder_id +
1160 copied)) { 1161 copied)) {
1161 ret = -EFAULT; 1162 ret = -EFAULT;
@@ -1185,8 +1186,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1185 list_for_each_entry(connector, 1186 list_for_each_entry(connector,
1186 &dev->mode_config.connector_list, 1187 &dev->mode_config.connector_list,
1187 head) { 1188 head) {
1188 DRM_DEBUG_KMS("CONNECTOR ID is %d\n", 1189 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1189 connector->base.id); 1190 connector->base.id,
1191 drm_get_connector_name(connector));
1190 if (put_user(connector->base.id, 1192 if (put_user(connector->base.id,
1191 connector_id + copied)) { 1193 connector_id + copied)) {
1192 ret = -EFAULT; 1194 ret = -EFAULT;
@@ -1209,7 +1211,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1209 } 1211 }
1210 card_res->count_connectors = connector_count; 1212 card_res->count_connectors = connector_count;
1211 1213
1212 DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs, 1214 DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
1213 card_res->count_connectors, card_res->count_encoders); 1215 card_res->count_connectors, card_res->count_encoders);
1214 1216
1215out: 1217out:
@@ -1312,7 +1314,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1312 1314
1313 memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); 1315 memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
1314 1316
1315 DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id); 1317 DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
1316 1318
1317 mutex_lock(&dev->mode_config.mutex); 1319 mutex_lock(&dev->mode_config.mutex);
1318 1320
@@ -1493,6 +1495,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1493 goto out; 1495 goto out;
1494 } 1496 }
1495 crtc = obj_to_crtc(obj); 1497 crtc = obj_to_crtc(obj);
1498 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
1496 1499
1497 if (crtc_req->mode_valid) { 1500 if (crtc_req->mode_valid) {
1498 /* If we have a mode we need a framebuffer. */ 1501 /* If we have a mode we need a framebuffer. */
@@ -1569,6 +1572,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1569 goto out; 1572 goto out;
1570 } 1573 }
1571 connector = obj_to_connector(obj); 1574 connector = obj_to_connector(obj);
1575 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1576 connector->base.id,
1577 drm_get_connector_name(connector));
1572 1578
1573 connector_set[i] = connector; 1579 connector_set[i] = connector;
1574 } 1580 }
@@ -1684,6 +1690,7 @@ int drm_mode_addfb(struct drm_device *dev,
1684 1690
1685 r->fb_id = fb->base.id; 1691 r->fb_id = fb->base.id;
1686 list_add(&fb->filp_head, &file_priv->fbs); 1692 list_add(&fb->filp_head, &file_priv->fbs);
1693 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
1687 1694
1688out: 1695out:
1689 mutex_unlock(&dev->mode_config.mutex); 1696 mutex_unlock(&dev->mode_config.mutex);
@@ -2610,6 +2617,15 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
2610 goto out; 2617 goto out;
2611 crtc = obj_to_crtc(obj); 2618 crtc = obj_to_crtc(obj);
2612 2619
2620 if (crtc->fb == NULL) {
2621 /* The framebuffer is currently unbound, presumably
2622 * due to a hotplug event, that userspace has not
2623 * yet discovered.
2624 */
2625 ret = -EBUSY;
2626 goto out;
2627 }
2628
2613 if (crtc->funcs->page_flip == NULL) 2629 if (crtc->funcs->page_flip == NULL)
2614 goto out; 2630 goto out;
2615 2631
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 774d21e4dcdd..11fe9c870d17 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -86,7 +86,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
86 int count = 0; 86 int count = 0;
87 int mode_flags = 0; 87 int mode_flags = 0;
88 88
89 DRM_DEBUG_KMS("%s\n", drm_get_connector_name(connector)); 89 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
90 drm_get_connector_name(connector));
90 /* set all modes to the unverified state */ 91 /* set all modes to the unverified state */
91 list_for_each_entry_safe(mode, t, &connector->modes, head) 92 list_for_each_entry_safe(mode, t, &connector->modes, head)
92 mode->status = MODE_UNVERIFIED; 93 mode->status = MODE_UNVERIFIED;
@@ -102,8 +103,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
102 connector->status = connector->funcs->detect(connector); 103 connector->status = connector->funcs->detect(connector);
103 104
104 if (connector->status == connector_status_disconnected) { 105 if (connector->status == connector_status_disconnected) {
105 DRM_DEBUG_KMS("%s is disconnected\n", 106 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
106 drm_get_connector_name(connector)); 107 connector->base.id, drm_get_connector_name(connector));
107 drm_mode_connector_update_edid_property(connector, NULL); 108 drm_mode_connector_update_edid_property(connector, NULL);
108 goto prune; 109 goto prune;
109 } 110 }
@@ -141,8 +142,8 @@ prune:
141 142
142 drm_mode_sort(&connector->modes); 143 drm_mode_sort(&connector->modes);
143 144
144 DRM_DEBUG_KMS("Probed modes for %s\n", 145 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
145 drm_get_connector_name(connector)); 146 drm_get_connector_name(connector));
146 list_for_each_entry_safe(mode, t, &connector->modes, head) { 147 list_for_each_entry_safe(mode, t, &connector->modes, head) {
147 mode->vrefresh = drm_mode_vrefresh(mode); 148 mode->vrefresh = drm_mode_vrefresh(mode);
148 149
@@ -374,6 +375,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
374 if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { 375 if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
375 goto done; 376 goto done;
376 } 377 }
378 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
377 379
378 /* Prepare the encoders and CRTCs before setting the mode. */ 380 /* Prepare the encoders and CRTCs before setting the mode. */
379 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 381 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -401,8 +403,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
401 if (encoder->crtc != crtc) 403 if (encoder->crtc != crtc)
402 continue; 404 continue;
403 405
404 DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder), 406 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
405 mode->name, mode->base.id); 407 encoder->base.id, drm_get_encoder_name(encoder),
408 mode->base.id, mode->name);
406 encoder_funcs = encoder->helper_private; 409 encoder_funcs = encoder->helper_private;
407 encoder_funcs->mode_set(encoder, mode, adjusted_mode); 410 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
408 } 411 }
@@ -478,10 +481,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
478 481
479 crtc_funcs = set->crtc->helper_private; 482 crtc_funcs = set->crtc->helper_private;
480 483
481 DRM_DEBUG_KMS("crtc: %p %d fb: %p connectors: %p num_connectors:" 484 if (set->fb) {
482 " %d (x, y) (%i, %i)\n", 485 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
483 set->crtc, set->crtc->base.id, set->fb, set->connectors, 486 set->crtc->base.id, set->fb->base.id,
484 (int)set->num_connectors, set->x, set->y); 487 (int)set->num_connectors, set->x, set->y);
488 } else {
489 DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n",
490 set->crtc->base.id, (int)set->num_connectors,
491 set->x, set->y);
492 }
485 493
486 dev = set->crtc->dev; 494 dev = set->crtc->dev;
487 495
@@ -610,8 +618,14 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
610 mode_changed = true; 618 mode_changed = true;
611 connector->encoder->crtc = new_crtc; 619 connector->encoder->crtc = new_crtc;
612 } 620 }
613 DRM_DEBUG_KMS("setting connector %d crtc to %p\n", 621 if (new_crtc) {
614 connector->base.id, new_crtc); 622 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
623 connector->base.id, drm_get_connector_name(connector),
624 new_crtc->base.id);
625 } else {
626 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
627 connector->base.id, drm_get_connector_name(connector));
628 }
615 } 629 }
616 630
617 /* mode_set_base is not a required function */ 631 /* mode_set_base is not a required function */
@@ -629,8 +643,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
629 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 643 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
630 set->x, set->y, 644 set->x, set->y,
631 old_fb)) { 645 old_fb)) {
632 DRM_ERROR("failed to set mode on crtc %p\n", 646 DRM_ERROR("failed to set mode on [CRTC:%d]\n",
633 set->crtc); 647 set->crtc->base.id);
634 ret = -EINVAL; 648 ret = -EINVAL;
635 goto fail; 649 goto fail;
636 } 650 }
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 510bc87d98f6..b5a51686f492 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -335,6 +335,7 @@ static void __exit drm_core_exit(void)
335 335
336 unregister_chrdev(DRM_MAJOR, "drm"); 336 unregister_chrdev(DRM_MAJOR, "drm");
337 337
338 idr_remove_all(&drm_minors_idr);
338 idr_destroy(&drm_minors_idr); 339 idr_destroy(&drm_minors_idr);
339} 340}
340 341
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8601b72b6f26..4f1b86714489 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -429,6 +429,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
429 idr_for_each(&file_private->object_idr, 429 idr_for_each(&file_private->object_idr,
430 &drm_gem_object_release_handle, NULL); 430 &drm_gem_object_release_handle, NULL);
431 431
432 idr_remove_all(&file_private->object_idr);
432 idr_destroy(&file_private->object_idr); 433 idr_destroy(&file_private->object_idr);
433} 434}
434 435
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 63575e2fa882..d1ad57450df1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -156,6 +156,9 @@ static void drm_master_destroy(struct kref *kref)
156 master->unique_len = 0; 156 master->unique_len = 0;
157 } 157 }
158 158
159 kfree(dev->devname);
160 dev->devname = NULL;
161
159 list_for_each_entry_safe(pt, next, &master->magicfree, head) { 162 list_for_each_entry_safe(pt, next, &master->magicfree, head) {
160 list_del(&pt->head); 163 list_del(&pt->head);
161 drm_ht_remove_item(&master->magiclist, &pt->hash_item); 164 drm_ht_remove_item(&master->magiclist, &pt->hash_item);
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 6d2abaf35ba2..92862563e7ee 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -2,3 +2,6 @@ ccflags-y := -Iinclude/drm
2 2
3ch7006-y := ch7006_drv.o ch7006_mode.o 3ch7006-y := ch7006_drv.o ch7006_mode.o
4obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o 4obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
5
6sil164-y := sil164_drv.o
7obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
new file mode 100644
index 000000000000..0b6773290c08
--- /dev/null
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -0,0 +1,462 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29#include "drm_encoder_slave.h"
30#include "i2c/sil164.h"
31
32struct sil164_priv {
33 struct sil164_encoder_params config;
34 struct i2c_client *duallink_slave;
35
36 uint8_t saved_state[0x10];
37 uint8_t saved_slave_state[0x10];
38};
39
40#define to_sil164_priv(x) \
41 ((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
42
43#define sil164_dbg(client, format, ...) do { \
44 if (drm_debug & DRM_UT_KMS) \
45 dev_printk(KERN_DEBUG, &client->dev, \
46 "%s: " format, __func__, ## __VA_ARGS__); \
47 } while (0)
48#define sil164_info(client, format, ...) \
49 dev_info(&client->dev, format, __VA_ARGS__)
50#define sil164_err(client, format, ...) \
51 dev_err(&client->dev, format, __VA_ARGS__)
52
53#define SIL164_I2C_ADDR_MASTER 0x38
54#define SIL164_I2C_ADDR_SLAVE 0x39
55
56/* HW register definitions */
57
58#define SIL164_VENDOR_LO 0x0
59#define SIL164_VENDOR_HI 0x1
60#define SIL164_DEVICE_LO 0x2
61#define SIL164_DEVICE_HI 0x3
62#define SIL164_REVISION 0x4
63#define SIL164_FREQ_MIN 0x6
64#define SIL164_FREQ_MAX 0x7
65#define SIL164_CONTROL0 0x8
66# define SIL164_CONTROL0_POWER_ON 0x01
67# define SIL164_CONTROL0_EDGE_RISING 0x02
68# define SIL164_CONTROL0_INPUT_24BIT 0x04
69# define SIL164_CONTROL0_DUAL_EDGE 0x08
70# define SIL164_CONTROL0_HSYNC_ON 0x10
71# define SIL164_CONTROL0_VSYNC_ON 0x20
72#define SIL164_DETECT 0x9
73# define SIL164_DETECT_INTR_STAT 0x01
74# define SIL164_DETECT_HOTPLUG_STAT 0x02
75# define SIL164_DETECT_RECEIVER_STAT 0x04
76# define SIL164_DETECT_INTR_MODE_RECEIVER 0x00
77# define SIL164_DETECT_INTR_MODE_HOTPLUG 0x08
78# define SIL164_DETECT_OUT_MODE_HIGH 0x00
79# define SIL164_DETECT_OUT_MODE_INTR 0x10
80# define SIL164_DETECT_OUT_MODE_RECEIVER 0x20
81# define SIL164_DETECT_OUT_MODE_HOTPLUG 0x30
82# define SIL164_DETECT_VSWING_STAT 0x80
83#define SIL164_CONTROL1 0xa
84# define SIL164_CONTROL1_DESKEW_ENABLE 0x10
85# define SIL164_CONTROL1_DESKEW_INCR_SHIFT 5
86#define SIL164_GPIO 0xb
87#define SIL164_CONTROL2 0xc
88# define SIL164_CONTROL2_FILTER_ENABLE 0x01
89# define SIL164_CONTROL2_FILTER_SETTING_SHIFT 1
90# define SIL164_CONTROL2_DUALLINK_MASTER 0x40
91# define SIL164_CONTROL2_SYNC_CONT 0x80
92#define SIL164_DUALLINK 0xd
93# define SIL164_DUALLINK_ENABLE 0x10
94# define SIL164_DUALLINK_SKEW_SHIFT 5
95#define SIL164_PLLZONE 0xe
96# define SIL164_PLLZONE_STAT 0x08
97# define SIL164_PLLZONE_FORCE_ON 0x10
98# define SIL164_PLLZONE_FORCE_HIGH 0x20
99
100/* HW access functions */
101
102static void
103sil164_write(struct i2c_client *client, uint8_t addr, uint8_t val)
104{
105 uint8_t buf[] = {addr, val};
106 int ret;
107
108 ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
109 if (ret < 0)
110 sil164_err(client, "Error %d writing to subaddress 0x%x\n",
111 ret, addr);
112}
113
114static uint8_t
115sil164_read(struct i2c_client *client, uint8_t addr)
116{
117 uint8_t val;
118 int ret;
119
120 ret = i2c_master_send(client, &addr, sizeof(addr));
121 if (ret < 0)
122 goto fail;
123
124 ret = i2c_master_recv(client, &val, sizeof(val));
125 if (ret < 0)
126 goto fail;
127
128 return val;
129
130fail:
131 sil164_err(client, "Error %d reading from subaddress 0x%x\n",
132 ret, addr);
133 return 0;
134}
135
136static void
137sil164_save_state(struct i2c_client *client, uint8_t *state)
138{
139 int i;
140
141 for (i = 0x8; i <= 0xe; i++)
142 state[i] = sil164_read(client, i);
143}
144
145static void
146sil164_restore_state(struct i2c_client *client, uint8_t *state)
147{
148 int i;
149
150 for (i = 0x8; i <= 0xe; i++)
151 sil164_write(client, i, state[i]);
152}
153
154static void
155sil164_set_power_state(struct i2c_client *client, bool on)
156{
157 uint8_t control0 = sil164_read(client, SIL164_CONTROL0);
158
159 if (on)
160 control0 |= SIL164_CONTROL0_POWER_ON;
161 else
162 control0 &= ~SIL164_CONTROL0_POWER_ON;
163
164 sil164_write(client, SIL164_CONTROL0, control0);
165}
166
167static void
168sil164_init_state(struct i2c_client *client,
169 struct sil164_encoder_params *config,
170 bool duallink)
171{
172 sil164_write(client, SIL164_CONTROL0,
173 SIL164_CONTROL0_HSYNC_ON |
174 SIL164_CONTROL0_VSYNC_ON |
175 (config->input_edge ? SIL164_CONTROL0_EDGE_RISING : 0) |
176 (config->input_width ? SIL164_CONTROL0_INPUT_24BIT : 0) |
177 (config->input_dual ? SIL164_CONTROL0_DUAL_EDGE : 0));
178
179 sil164_write(client, SIL164_DETECT,
180 SIL164_DETECT_INTR_STAT |
181 SIL164_DETECT_OUT_MODE_RECEIVER);
182
183 sil164_write(client, SIL164_CONTROL1,
184 (config->input_skew ? SIL164_CONTROL1_DESKEW_ENABLE : 0) |
185 (((config->input_skew + 4) & 0x7)
186 << SIL164_CONTROL1_DESKEW_INCR_SHIFT));
187
188 sil164_write(client, SIL164_CONTROL2,
189 SIL164_CONTROL2_SYNC_CONT |
190 (config->pll_filter ? 0 : SIL164_CONTROL2_FILTER_ENABLE) |
191 (4 << SIL164_CONTROL2_FILTER_SETTING_SHIFT));
192
193 sil164_write(client, SIL164_PLLZONE, 0);
194
195 if (duallink)
196 sil164_write(client, SIL164_DUALLINK,
197 SIL164_DUALLINK_ENABLE |
198 (((config->duallink_skew + 4) & 0x7)
199 << SIL164_DUALLINK_SKEW_SHIFT));
200 else
201 sil164_write(client, SIL164_DUALLINK, 0);
202}
203
204/* DRM encoder functions */
205
206static void
207sil164_encoder_set_config(struct drm_encoder *encoder, void *params)
208{
209 struct sil164_priv *priv = to_sil164_priv(encoder);
210
211 priv->config = *(struct sil164_encoder_params *)params;
212}
213
214static void
215sil164_encoder_dpms(struct drm_encoder *encoder, int mode)
216{
217 struct sil164_priv *priv = to_sil164_priv(encoder);
218 bool on = (mode == DRM_MODE_DPMS_ON);
219 bool duallink = (on && encoder->crtc->mode.clock > 165000);
220
221 sil164_set_power_state(drm_i2c_encoder_get_client(encoder), on);
222
223 if (priv->duallink_slave)
224 sil164_set_power_state(priv->duallink_slave, duallink);
225}
226
227static void
228sil164_encoder_save(struct drm_encoder *encoder)
229{
230 struct sil164_priv *priv = to_sil164_priv(encoder);
231
232 sil164_save_state(drm_i2c_encoder_get_client(encoder),
233 priv->saved_state);
234
235 if (priv->duallink_slave)
236 sil164_save_state(priv->duallink_slave,
237 priv->saved_slave_state);
238}
239
240static void
241sil164_encoder_restore(struct drm_encoder *encoder)
242{
243 struct sil164_priv *priv = to_sil164_priv(encoder);
244
245 sil164_restore_state(drm_i2c_encoder_get_client(encoder),
246 priv->saved_state);
247
248 if (priv->duallink_slave)
249 sil164_restore_state(priv->duallink_slave,
250 priv->saved_slave_state);
251}
252
253static bool
254sil164_encoder_mode_fixup(struct drm_encoder *encoder,
255 struct drm_display_mode *mode,
256 struct drm_display_mode *adjusted_mode)
257{
258 return true;
259}
260
261static int
262sil164_encoder_mode_valid(struct drm_encoder *encoder,
263 struct drm_display_mode *mode)
264{
265 struct sil164_priv *priv = to_sil164_priv(encoder);
266
267 if (mode->clock < 32000)
268 return MODE_CLOCK_LOW;
269
270 if (mode->clock > 330000 ||
271 (mode->clock > 165000 && !priv->duallink_slave))
272 return MODE_CLOCK_HIGH;
273
274 return MODE_OK;
275}
276
277static void
278sil164_encoder_mode_set(struct drm_encoder *encoder,
279 struct drm_display_mode *mode,
280 struct drm_display_mode *adjusted_mode)
281{
282 struct sil164_priv *priv = to_sil164_priv(encoder);
283 bool duallink = adjusted_mode->clock > 165000;
284
285 sil164_init_state(drm_i2c_encoder_get_client(encoder),
286 &priv->config, duallink);
287
288 if (priv->duallink_slave)
289 sil164_init_state(priv->duallink_slave,
290 &priv->config, duallink);
291
292 sil164_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
293}
294
295static enum drm_connector_status
296sil164_encoder_detect(struct drm_encoder *encoder,
297 struct drm_connector *connector)
298{
299 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
300
301 if (sil164_read(client, SIL164_DETECT) & SIL164_DETECT_HOTPLUG_STAT)
302 return connector_status_connected;
303 else
304 return connector_status_disconnected;
305}
306
307static int
308sil164_encoder_get_modes(struct drm_encoder *encoder,
309 struct drm_connector *connector)
310{
311 return 0;
312}
313
314static int
315sil164_encoder_create_resources(struct drm_encoder *encoder,
316 struct drm_connector *connector)
317{
318 return 0;
319}
320
321static int
322sil164_encoder_set_property(struct drm_encoder *encoder,
323 struct drm_connector *connector,
324 struct drm_property *property,
325 uint64_t val)
326{
327 return 0;
328}
329
330static void
331sil164_encoder_destroy(struct drm_encoder *encoder)
332{
333 struct sil164_priv *priv = to_sil164_priv(encoder);
334
335 if (priv->duallink_slave)
336 i2c_unregister_device(priv->duallink_slave);
337
338 kfree(priv);
339 drm_i2c_encoder_destroy(encoder);
340}
341
342static struct drm_encoder_slave_funcs sil164_encoder_funcs = {
343 .set_config = sil164_encoder_set_config,
344 .destroy = sil164_encoder_destroy,
345 .dpms = sil164_encoder_dpms,
346 .save = sil164_encoder_save,
347 .restore = sil164_encoder_restore,
348 .mode_fixup = sil164_encoder_mode_fixup,
349 .mode_valid = sil164_encoder_mode_valid,
350 .mode_set = sil164_encoder_mode_set,
351 .detect = sil164_encoder_detect,
352 .get_modes = sil164_encoder_get_modes,
353 .create_resources = sil164_encoder_create_resources,
354 .set_property = sil164_encoder_set_property,
355};
356
357/* I2C driver functions */
358
359static int
360sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
361{
362 int vendor = sil164_read(client, SIL164_VENDOR_HI) << 8 |
363 sil164_read(client, SIL164_VENDOR_LO);
364 int device = sil164_read(client, SIL164_DEVICE_HI) << 8 |
365 sil164_read(client, SIL164_DEVICE_LO);
366 int rev = sil164_read(client, SIL164_REVISION);
367
368 if (vendor != 0x1 || device != 0x6) {
369 sil164_dbg(client, "Unknown device %x:%x.%x\n",
370 vendor, device, rev);
371 return -ENODEV;
372 }
373
374 sil164_info(client, "Detected device %x:%x.%x\n",
375 vendor, device, rev);
376
377 return 0;
378}
379
380static int
381sil164_remove(struct i2c_client *client)
382{
383 return 0;
384}
385
386static struct i2c_client *
387sil164_detect_slave(struct i2c_client *client)
388{
389 struct i2c_adapter *adap = client->adapter;
390 struct i2c_msg msg = {
391 .addr = SIL164_I2C_ADDR_SLAVE,
392 .len = 0,
393 };
394 const struct i2c_board_info info = {
395 I2C_BOARD_INFO("sil164", SIL164_I2C_ADDR_SLAVE)
396 };
397
398 if (i2c_transfer(adap, &msg, 1) != 1) {
399 sil164_dbg(adap, "No dual-link slave found.");
400 return NULL;
401 }
402
403 return i2c_new_device(adap, &info);
404}
405
406static int
407sil164_encoder_init(struct i2c_client *client,
408 struct drm_device *dev,
409 struct drm_encoder_slave *encoder)
410{
411 struct sil164_priv *priv;
412
413 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
414 if (!priv)
415 return -ENOMEM;
416
417 encoder->slave_priv = priv;
418 encoder->slave_funcs = &sil164_encoder_funcs;
419
420 priv->duallink_slave = sil164_detect_slave(client);
421
422 return 0;
423}
424
425static struct i2c_device_id sil164_ids[] = {
426 { "sil164", 0 },
427 { }
428};
429MODULE_DEVICE_TABLE(i2c, sil164_ids);
430
431static struct drm_i2c_encoder_driver sil164_driver = {
432 .i2c_driver = {
433 .probe = sil164_probe,
434 .remove = sil164_remove,
435 .driver = {
436 .name = "sil164",
437 },
438 .id_table = sil164_ids,
439 },
440 .encoder_init = sil164_encoder_init,
441};
442
443/* Module initialization */
444
445static int __init
446sil164_init(void)
447{
448 return drm_i2c_encoder_register(THIS_MODULE, &sil164_driver);
449}
450
451static void __exit
452sil164_exit(void)
453{
454 drm_i2c_encoder_unregister(&sil164_driver);
455}
456
457MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
458MODULE_DESCRIPTION("Silicon Image sil164 TMDS transmitter driver");
459MODULE_LICENSE("GPL and additional rights");
460
461module_init(sil164_init);
462module_exit(sil164_exit);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 997d91707ad2..09c86ed89927 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -60,9 +60,8 @@ static struct drm_buf *i810_freelist_get(struct drm_device * dev)
60 /* In use is already a pointer */ 60 /* In use is already a pointer */
61 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, 61 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
62 I810_BUF_CLIENT); 62 I810_BUF_CLIENT);
63 if (used == I810_BUF_FREE) { 63 if (used == I810_BUF_FREE)
64 return buf; 64 return buf;
65 }
66 } 65 }
67 return NULL; 66 return NULL;
68} 67}
@@ -71,7 +70,7 @@ static struct drm_buf *i810_freelist_get(struct drm_device * dev)
71 * yet, the hardware updates in use for us once its on the ring buffer. 70 * yet, the hardware updates in use for us once its on the ring buffer.
72 */ 71 */
73 72
74static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf) 73static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
75{ 74{
76 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 75 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
77 int used; 76 int used;
@@ -121,7 +120,7 @@ static const struct file_operations i810_buffer_fops = {
121 .fasync = drm_fasync, 120 .fasync = drm_fasync,
122}; 121};
123 122
124static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) 123static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
125{ 124{
126 struct drm_device *dev = file_priv->minor->dev; 125 struct drm_device *dev = file_priv->minor->dev;
127 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 126 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
@@ -152,7 +151,7 @@ static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
152 return retcode; 151 return retcode;
153} 152}
154 153
155static int i810_unmap_buffer(struct drm_buf * buf) 154static int i810_unmap_buffer(struct drm_buf *buf)
156{ 155{
157 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 156 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
158 int retcode = 0; 157 int retcode = 0;
@@ -172,7 +171,7 @@ static int i810_unmap_buffer(struct drm_buf * buf)
172 return retcode; 171 return retcode;
173} 172}
174 173
175static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, 174static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
176 struct drm_file *file_priv) 175 struct drm_file *file_priv)
177{ 176{
178 struct drm_buf *buf; 177 struct drm_buf *buf;
@@ -202,7 +201,7 @@ static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
202 return retcode; 201 return retcode;
203} 202}
204 203
205static int i810_dma_cleanup(struct drm_device * dev) 204static int i810_dma_cleanup(struct drm_device *dev)
206{ 205{
207 struct drm_device_dma *dma = dev->dma; 206 struct drm_device_dma *dma = dev->dma;
208 207
@@ -218,9 +217,8 @@ static int i810_dma_cleanup(struct drm_device * dev)
218 drm_i810_private_t *dev_priv = 217 drm_i810_private_t *dev_priv =
219 (drm_i810_private_t *) dev->dev_private; 218 (drm_i810_private_t *) dev->dev_private;
220 219
221 if (dev_priv->ring.virtual_start) { 220 if (dev_priv->ring.virtual_start)
222 drm_core_ioremapfree(&dev_priv->ring.map, dev); 221 drm_core_ioremapfree(&dev_priv->ring.map, dev);
223 }
224 if (dev_priv->hw_status_page) { 222 if (dev_priv->hw_status_page) {
225 pci_free_consistent(dev->pdev, PAGE_SIZE, 223 pci_free_consistent(dev->pdev, PAGE_SIZE,
226 dev_priv->hw_status_page, 224 dev_priv->hw_status_page,
@@ -242,7 +240,7 @@ static int i810_dma_cleanup(struct drm_device * dev)
242 return 0; 240 return 0;
243} 241}
244 242
245static int i810_wait_ring(struct drm_device * dev, int n) 243static int i810_wait_ring(struct drm_device *dev, int n)
246{ 244{
247 drm_i810_private_t *dev_priv = dev->dev_private; 245 drm_i810_private_t *dev_priv = dev->dev_private;
248 drm_i810_ring_buffer_t *ring = &(dev_priv->ring); 246 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -271,11 +269,11 @@ static int i810_wait_ring(struct drm_device * dev, int n)
271 udelay(1); 269 udelay(1);
272 } 270 }
273 271
274 out_wait_ring: 272out_wait_ring:
275 return iters; 273 return iters;
276} 274}
277 275
278static void i810_kernel_lost_context(struct drm_device * dev) 276static void i810_kernel_lost_context(struct drm_device *dev)
279{ 277{
280 drm_i810_private_t *dev_priv = dev->dev_private; 278 drm_i810_private_t *dev_priv = dev->dev_private;
281 drm_i810_ring_buffer_t *ring = &(dev_priv->ring); 279 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -287,7 +285,7 @@ static void i810_kernel_lost_context(struct drm_device * dev)
287 ring->space += ring->Size; 285 ring->space += ring->Size;
288} 286}
289 287
290static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv) 288static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
291{ 289{
292 struct drm_device_dma *dma = dev->dma; 290 struct drm_device_dma *dma = dev->dma;
293 int my_idx = 24; 291 int my_idx = 24;
@@ -322,9 +320,9 @@ static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_
322 return 0; 320 return 0;
323} 321}
324 322
325static int i810_dma_initialize(struct drm_device * dev, 323static int i810_dma_initialize(struct drm_device *dev,
326 drm_i810_private_t * dev_priv, 324 drm_i810_private_t *dev_priv,
327 drm_i810_init_t * init) 325 drm_i810_init_t *init)
328{ 326{
329 struct drm_map_list *r_list; 327 struct drm_map_list *r_list;
330 memset(dev_priv, 0, sizeof(drm_i810_private_t)); 328 memset(dev_priv, 0, sizeof(drm_i810_private_t));
@@ -462,7 +460,7 @@ static int i810_dma_init(struct drm_device *dev, void *data,
462 * Use 'volatile' & local var tmp to force the emitted values to be 460 * Use 'volatile' & local var tmp to force the emitted values to be
463 * identical to the verified ones. 461 * identical to the verified ones.
464 */ 462 */
465static void i810EmitContextVerified(struct drm_device * dev, 463static void i810EmitContextVerified(struct drm_device *dev,
466 volatile unsigned int *code) 464 volatile unsigned int *code)
467{ 465{
468 drm_i810_private_t *dev_priv = dev->dev_private; 466 drm_i810_private_t *dev_priv = dev->dev_private;
@@ -495,7 +493,7 @@ static void i810EmitContextVerified(struct drm_device * dev,
495 ADVANCE_LP_RING(); 493 ADVANCE_LP_RING();
496} 494}
497 495
498static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code) 496static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
499{ 497{
500 drm_i810_private_t *dev_priv = dev->dev_private; 498 drm_i810_private_t *dev_priv = dev->dev_private;
501 int i, j = 0; 499 int i, j = 0;
@@ -528,7 +526,7 @@ static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *
528 526
529/* Need to do some additional checking when setting the dest buffer. 527/* Need to do some additional checking when setting the dest buffer.
530 */ 528 */
531static void i810EmitDestVerified(struct drm_device * dev, 529static void i810EmitDestVerified(struct drm_device *dev,
532 volatile unsigned int *code) 530 volatile unsigned int *code)
533{ 531{
534 drm_i810_private_t *dev_priv = dev->dev_private; 532 drm_i810_private_t *dev_priv = dev->dev_private;
@@ -563,7 +561,7 @@ static void i810EmitDestVerified(struct drm_device * dev,
563 ADVANCE_LP_RING(); 561 ADVANCE_LP_RING();
564} 562}
565 563
566static void i810EmitState(struct drm_device * dev) 564static void i810EmitState(struct drm_device *dev)
567{ 565{
568 drm_i810_private_t *dev_priv = dev->dev_private; 566 drm_i810_private_t *dev_priv = dev->dev_private;
569 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; 567 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -594,7 +592,7 @@ static void i810EmitState(struct drm_device * dev)
594 592
595/* need to verify 593/* need to verify
596 */ 594 */
597static void i810_dma_dispatch_clear(struct drm_device * dev, int flags, 595static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
598 unsigned int clear_color, 596 unsigned int clear_color,
599 unsigned int clear_zval) 597 unsigned int clear_zval)
600{ 598{
@@ -669,7 +667,7 @@ static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
669 } 667 }
670} 668}
671 669
672static void i810_dma_dispatch_swap(struct drm_device * dev) 670static void i810_dma_dispatch_swap(struct drm_device *dev)
673{ 671{
674 drm_i810_private_t *dev_priv = dev->dev_private; 672 drm_i810_private_t *dev_priv = dev->dev_private;
675 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; 673 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -715,8 +713,8 @@ static void i810_dma_dispatch_swap(struct drm_device * dev)
715 } 713 }
716} 714}
717 715
718static void i810_dma_dispatch_vertex(struct drm_device * dev, 716static void i810_dma_dispatch_vertex(struct drm_device *dev,
719 struct drm_buf * buf, int discard, int used) 717 struct drm_buf *buf, int discard, int used)
720{ 718{
721 drm_i810_private_t *dev_priv = dev->dev_private; 719 drm_i810_private_t *dev_priv = dev->dev_private;
722 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 720 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
@@ -795,7 +793,7 @@ static void i810_dma_dispatch_vertex(struct drm_device * dev,
795 } 793 }
796} 794}
797 795
798static void i810_dma_dispatch_flip(struct drm_device * dev) 796static void i810_dma_dispatch_flip(struct drm_device *dev)
799{ 797{
800 drm_i810_private_t *dev_priv = dev->dev_private; 798 drm_i810_private_t *dev_priv = dev->dev_private;
801 int pitch = dev_priv->pitch; 799 int pitch = dev_priv->pitch;
@@ -841,7 +839,7 @@ static void i810_dma_dispatch_flip(struct drm_device * dev)
841 839
842} 840}
843 841
844static void i810_dma_quiescent(struct drm_device * dev) 842static void i810_dma_quiescent(struct drm_device *dev)
845{ 843{
846 drm_i810_private_t *dev_priv = dev->dev_private; 844 drm_i810_private_t *dev_priv = dev->dev_private;
847 RING_LOCALS; 845 RING_LOCALS;
@@ -858,7 +856,7 @@ static void i810_dma_quiescent(struct drm_device * dev)
858 i810_wait_ring(dev, dev_priv->ring.Size - 8); 856 i810_wait_ring(dev, dev_priv->ring.Size - 8);
859} 857}
860 858
861static int i810_flush_queue(struct drm_device * dev) 859static int i810_flush_queue(struct drm_device *dev)
862{ 860{
863 drm_i810_private_t *dev_priv = dev->dev_private; 861 drm_i810_private_t *dev_priv = dev->dev_private;
864 struct drm_device_dma *dma = dev->dma; 862 struct drm_device_dma *dma = dev->dma;
@@ -891,7 +889,7 @@ static int i810_flush_queue(struct drm_device * dev)
891} 889}
892 890
893/* Must be called with the lock held */ 891/* Must be called with the lock held */
894static void i810_reclaim_buffers(struct drm_device * dev, 892static void i810_reclaim_buffers(struct drm_device *dev,
895 struct drm_file *file_priv) 893 struct drm_file *file_priv)
896{ 894{
897 struct drm_device_dma *dma = dev->dma; 895 struct drm_device_dma *dma = dev->dma;
@@ -969,9 +967,8 @@ static int i810_clear_bufs(struct drm_device *dev, void *data,
969 LOCK_TEST_WITH_RETURN(dev, file_priv); 967 LOCK_TEST_WITH_RETURN(dev, file_priv);
970 968
971 /* GH: Someone's doing nasty things... */ 969 /* GH: Someone's doing nasty things... */
972 if (!dev->dev_private) { 970 if (!dev->dev_private)
973 return -EINVAL; 971 return -EINVAL;
974 }
975 972
976 i810_dma_dispatch_clear(dev, clear->flags, 973 i810_dma_dispatch_clear(dev, clear->flags,
977 clear->clear_color, clear->clear_depth); 974 clear->clear_color, clear->clear_depth);
@@ -1039,7 +1036,7 @@ static int i810_docopy(struct drm_device *dev, void *data,
1039 return 0; 1036 return 0;
1040} 1037}
1041 1038
1042static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used, 1039static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
1043 unsigned int last_render) 1040 unsigned int last_render)
1044{ 1041{
1045 drm_i810_private_t *dev_priv = dev->dev_private; 1042 drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1053,9 +1050,8 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,
1053 i810_kernel_lost_context(dev); 1050 i810_kernel_lost_context(dev);
1054 1051
1055 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE); 1052 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
1056 if (u != I810_BUF_CLIENT) { 1053 if (u != I810_BUF_CLIENT)
1057 DRM_DEBUG("MC found buffer that isn't mine!\n"); 1054 DRM_DEBUG("MC found buffer that isn't mine!\n");
1058 }
1059 1055
1060 if (used > 4 * 1024) 1056 if (used > 4 * 1024)
1061 used = 0; 1057 used = 0;
@@ -1160,7 +1156,7 @@ static int i810_ov0_flip(struct drm_device *dev, void *data,
1160 1156
1161 LOCK_TEST_WITH_RETURN(dev, file_priv); 1157 LOCK_TEST_WITH_RETURN(dev, file_priv);
1162 1158
1163 //Tell the overlay to update 1159 /* Tell the overlay to update */
1164 I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); 1160 I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
1165 1161
1166 return 0; 1162 return 0;
@@ -1168,7 +1164,7 @@ static int i810_ov0_flip(struct drm_device *dev, void *data,
1168 1164
1169/* Not sure why this isn't set all the time: 1165/* Not sure why this isn't set all the time:
1170 */ 1166 */
1171static void i810_do_init_pageflip(struct drm_device * dev) 1167static void i810_do_init_pageflip(struct drm_device *dev)
1172{ 1168{
1173 drm_i810_private_t *dev_priv = dev->dev_private; 1169 drm_i810_private_t *dev_priv = dev->dev_private;
1174 1170
@@ -1178,7 +1174,7 @@ static void i810_do_init_pageflip(struct drm_device * dev)
1178 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 1174 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1179} 1175}
1180 1176
1181static int i810_do_cleanup_pageflip(struct drm_device * dev) 1177static int i810_do_cleanup_pageflip(struct drm_device *dev)
1182{ 1178{
1183 drm_i810_private_t *dev_priv = dev->dev_private; 1179 drm_i810_private_t *dev_priv = dev->dev_private;
1184 1180
@@ -1218,28 +1214,27 @@ int i810_driver_load(struct drm_device *dev, unsigned long flags)
1218 return 0; 1214 return 0;
1219} 1215}
1220 1216
1221void i810_driver_lastclose(struct drm_device * dev) 1217void i810_driver_lastclose(struct drm_device *dev)
1222{ 1218{
1223 i810_dma_cleanup(dev); 1219 i810_dma_cleanup(dev);
1224} 1220}
1225 1221
1226void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1222void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1227{ 1223{
1228 if (dev->dev_private) { 1224 if (dev->dev_private) {
1229 drm_i810_private_t *dev_priv = dev->dev_private; 1225 drm_i810_private_t *dev_priv = dev->dev_private;
1230 if (dev_priv->page_flipping) { 1226 if (dev_priv->page_flipping)
1231 i810_do_cleanup_pageflip(dev); 1227 i810_do_cleanup_pageflip(dev);
1232 }
1233 } 1228 }
1234} 1229}
1235 1230
1236void i810_driver_reclaim_buffers_locked(struct drm_device * dev, 1231void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
1237 struct drm_file *file_priv) 1232 struct drm_file *file_priv)
1238{ 1233{
1239 i810_reclaim_buffers(dev, file_priv); 1234 i810_reclaim_buffers(dev, file_priv);
1240} 1235}
1241 1236
1242int i810_driver_dma_quiescent(struct drm_device * dev) 1237int i810_driver_dma_quiescent(struct drm_device *dev)
1243{ 1238{
1244 i810_dma_quiescent(dev); 1239 i810_dma_quiescent(dev);
1245 return 0; 1240 return 0;
@@ -1276,7 +1271,7 @@ int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
1276 * \returns 1271 * \returns
1277 * A value of 1 is always retured to indictate every i810 is AGP. 1272 * A value of 1 is always retured to indictate every i810 is AGP.
1278 */ 1273 */
1279int i810_driver_device_is_agp(struct drm_device * dev) 1274int i810_driver_device_is_agp(struct drm_device *dev)
1280{ 1275{
1281 return 1; 1276 return 1;
1282} 1277}
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 21e2691f28f9..0743fe90f1e3 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -115,16 +115,16 @@ typedef struct drm_i810_private {
115} drm_i810_private_t; 115} drm_i810_private_t;
116 116
117 /* i810_dma.c */ 117 /* i810_dma.c */
118extern int i810_driver_dma_quiescent(struct drm_device * dev); 118extern int i810_driver_dma_quiescent(struct drm_device *dev);
119extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, 119extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
120 struct drm_file *file_priv); 120 struct drm_file *file_priv);
121extern int i810_driver_load(struct drm_device *, unsigned long flags); 121extern int i810_driver_load(struct drm_device *, unsigned long flags);
122extern void i810_driver_lastclose(struct drm_device * dev); 122extern void i810_driver_lastclose(struct drm_device *dev);
123extern void i810_driver_preclose(struct drm_device * dev, 123extern void i810_driver_preclose(struct drm_device *dev,
124 struct drm_file *file_priv); 124 struct drm_file *file_priv);
125extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, 125extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
126 struct drm_file *file_priv); 126 struct drm_file *file_priv);
127extern int i810_driver_device_is_agp(struct drm_device * dev); 127extern int i810_driver_device_is_agp(struct drm_device *dev);
128 128
129extern struct drm_ioctl_desc i810_ioctls[]; 129extern struct drm_ioctl_desc i810_ioctls[];
130extern int i810_max_ioctl; 130extern int i810_max_ioctl;
@@ -132,39 +132,41 @@ extern int i810_max_ioctl;
132#define I810_BASE(reg) ((unsigned long) \ 132#define I810_BASE(reg) ((unsigned long) \
133 dev_priv->mmio_map->handle) 133 dev_priv->mmio_map->handle)
134#define I810_ADDR(reg) (I810_BASE(reg) + reg) 134#define I810_ADDR(reg) (I810_BASE(reg) + reg)
135#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg) 135#define I810_DEREF(reg) (*(__volatile__ int *)I810_ADDR(reg))
136#define I810_READ(reg) I810_DEREF(reg) 136#define I810_READ(reg) I810_DEREF(reg)
137#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0) 137#define I810_WRITE(reg, val) do { I810_DEREF(reg) = val; } while (0)
138#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg) 138#define I810_DEREF16(reg) (*(__volatile__ u16 *)I810_ADDR(reg))
139#define I810_READ16(reg) I810_DEREF16(reg) 139#define I810_READ16(reg) I810_DEREF16(reg)
140#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0) 140#define I810_WRITE16(reg, val) do { I810_DEREF16(reg) = val; } while (0)
141 141
142#define I810_VERBOSE 0 142#define I810_VERBOSE 0
143#define RING_LOCALS unsigned int outring, ringmask; \ 143#define RING_LOCALS unsigned int outring, ringmask; \
144 volatile char *virt; 144 volatile char *virt;
145 145
146#define BEGIN_LP_RING(n) do { \ 146#define BEGIN_LP_RING(n) do { \
147 if (I810_VERBOSE) \ 147 if (I810_VERBOSE) \
148 DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \ 148 DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
149 if (dev_priv->ring.space < n*4) \ 149 if (dev_priv->ring.space < n*4) \
150 i810_wait_ring(dev, n*4); \ 150 i810_wait_ring(dev, n*4); \
151 dev_priv->ring.space -= n*4; \ 151 dev_priv->ring.space -= n*4; \
152 outring = dev_priv->ring.tail; \ 152 outring = dev_priv->ring.tail; \
153 ringmask = dev_priv->ring.tail_mask; \ 153 ringmask = dev_priv->ring.tail_mask; \
154 virt = dev_priv->ring.virtual_start; \ 154 virt = dev_priv->ring.virtual_start; \
155} while (0) 155} while (0)
156 156
157#define ADVANCE_LP_RING() do { \ 157#define ADVANCE_LP_RING() do { \
158 if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ 158 if (I810_VERBOSE) \
159 DRM_DEBUG("ADVANCE_LP_RING\n"); \
159 dev_priv->ring.tail = outring; \ 160 dev_priv->ring.tail = outring; \
160 I810_WRITE(LP_RING + RING_TAIL, outring); \ 161 I810_WRITE(LP_RING + RING_TAIL, outring); \
161} while(0) 162} while (0)
162 163
163#define OUT_RING(n) do { \ 164#define OUT_RING(n) do { \
164 if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ 165 if (I810_VERBOSE) \
165 *(volatile unsigned int *)(virt + outring) = n; \ 166 DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
166 outring += 4; \ 167 *(volatile unsigned int *)(virt + outring) = n; \
167 outring &= ringmask; \ 168 outring += 4; \
169 outring &= ringmask; \
168} while (0) 170} while (0)
169 171
170#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) 172#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 65759a9a85c8..7ee85ea507ce 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -62,9 +62,8 @@ static struct drm_buf *i830_freelist_get(struct drm_device * dev)
62 /* In use is already a pointer */ 62 /* In use is already a pointer */
63 used = cmpxchg(buf_priv->in_use, I830_BUF_FREE, 63 used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
64 I830_BUF_CLIENT); 64 I830_BUF_CLIENT);
65 if (used == I830_BUF_FREE) { 65 if (used == I830_BUF_FREE)
66 return buf; 66 return buf;
67 }
68 } 67 }
69 return NULL; 68 return NULL;
70} 69}
@@ -73,7 +72,7 @@ static struct drm_buf *i830_freelist_get(struct drm_device * dev)
73 * yet, the hardware updates in use for us once its on the ring buffer. 72 * yet, the hardware updates in use for us once its on the ring buffer.
74 */ 73 */
75 74
76static int i830_freelist_put(struct drm_device * dev, struct drm_buf * buf) 75static int i830_freelist_put(struct drm_device *dev, struct drm_buf *buf)
77{ 76{
78 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 77 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
79 int used; 78 int used;
@@ -123,7 +122,7 @@ static const struct file_operations i830_buffer_fops = {
123 .fasync = drm_fasync, 122 .fasync = drm_fasync,
124}; 123};
125 124
126static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) 125static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
127{ 126{
128 struct drm_device *dev = file_priv->minor->dev; 127 struct drm_device *dev = file_priv->minor->dev;
129 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 128 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
@@ -156,7 +155,7 @@ static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
156 return retcode; 155 return retcode;
157} 156}
158 157
159static int i830_unmap_buffer(struct drm_buf * buf) 158static int i830_unmap_buffer(struct drm_buf *buf)
160{ 159{
161 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 160 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
162 int retcode = 0; 161 int retcode = 0;
@@ -176,7 +175,7 @@ static int i830_unmap_buffer(struct drm_buf * buf)
176 return retcode; 175 return retcode;
177} 176}
178 177
179static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d, 178static int i830_dma_get_buffer(struct drm_device *dev, drm_i830_dma_t *d,
180 struct drm_file *file_priv) 179 struct drm_file *file_priv)
181{ 180{
182 struct drm_buf *buf; 181 struct drm_buf *buf;
@@ -206,7 +205,7 @@ static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
206 return retcode; 205 return retcode;
207} 206}
208 207
209static int i830_dma_cleanup(struct drm_device * dev) 208static int i830_dma_cleanup(struct drm_device *dev)
210{ 209{
211 struct drm_device_dma *dma = dev->dma; 210 struct drm_device_dma *dma = dev->dma;
212 211
@@ -222,9 +221,8 @@ static int i830_dma_cleanup(struct drm_device * dev)
222 drm_i830_private_t *dev_priv = 221 drm_i830_private_t *dev_priv =
223 (drm_i830_private_t *) dev->dev_private; 222 (drm_i830_private_t *) dev->dev_private;
224 223
225 if (dev_priv->ring.virtual_start) { 224 if (dev_priv->ring.virtual_start)
226 drm_core_ioremapfree(&dev_priv->ring.map, dev); 225 drm_core_ioremapfree(&dev_priv->ring.map, dev);
227 }
228 if (dev_priv->hw_status_page) { 226 if (dev_priv->hw_status_page) {
229 pci_free_consistent(dev->pdev, PAGE_SIZE, 227 pci_free_consistent(dev->pdev, PAGE_SIZE,
230 dev_priv->hw_status_page, 228 dev_priv->hw_status_page,
@@ -246,7 +244,7 @@ static int i830_dma_cleanup(struct drm_device * dev)
246 return 0; 244 return 0;
247} 245}
248 246
249int i830_wait_ring(struct drm_device * dev, int n, const char *caller) 247int i830_wait_ring(struct drm_device *dev, int n, const char *caller)
250{ 248{
251 drm_i830_private_t *dev_priv = dev->dev_private; 249 drm_i830_private_t *dev_priv = dev->dev_private;
252 drm_i830_ring_buffer_t *ring = &(dev_priv->ring); 250 drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
@@ -276,11 +274,11 @@ int i830_wait_ring(struct drm_device * dev, int n, const char *caller)
276 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; 274 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
277 } 275 }
278 276
279 out_wait_ring: 277out_wait_ring:
280 return iters; 278 return iters;
281} 279}
282 280
283static void i830_kernel_lost_context(struct drm_device * dev) 281static void i830_kernel_lost_context(struct drm_device *dev)
284{ 282{
285 drm_i830_private_t *dev_priv = dev->dev_private; 283 drm_i830_private_t *dev_priv = dev->dev_private;
286 drm_i830_ring_buffer_t *ring = &(dev_priv->ring); 284 drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
@@ -295,7 +293,7 @@ static void i830_kernel_lost_context(struct drm_device * dev)
295 dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY; 293 dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
296} 294}
297 295
298static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_priv) 296static int i830_freelist_init(struct drm_device *dev, drm_i830_private_t *dev_priv)
299{ 297{
300 struct drm_device_dma *dma = dev->dma; 298 struct drm_device_dma *dma = dev->dma;
301 int my_idx = 36; 299 int my_idx = 36;
@@ -329,9 +327,9 @@ static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_
329 return 0; 327 return 0;
330} 328}
331 329
332static int i830_dma_initialize(struct drm_device * dev, 330static int i830_dma_initialize(struct drm_device *dev,
333 drm_i830_private_t * dev_priv, 331 drm_i830_private_t *dev_priv,
334 drm_i830_init_t * init) 332 drm_i830_init_t *init)
335{ 333{
336 struct drm_map_list *r_list; 334 struct drm_map_list *r_list;
337 335
@@ -482,7 +480,7 @@ static int i830_dma_init(struct drm_device *dev, void *data,
482/* Most efficient way to verify state for the i830 is as it is 480/* Most efficient way to verify state for the i830 is as it is
483 * emitted. Non-conformant state is silently dropped. 481 * emitted. Non-conformant state is silently dropped.
484 */ 482 */
485static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code) 483static void i830EmitContextVerified(struct drm_device *dev, unsigned int *code)
486{ 484{
487 drm_i830_private_t *dev_priv = dev->dev_private; 485 drm_i830_private_t *dev_priv = dev->dev_private;
488 int i, j = 0; 486 int i, j = 0;
@@ -527,7 +525,7 @@ static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code)
527 ADVANCE_LP_RING(); 525 ADVANCE_LP_RING();
528} 526}
529 527
530static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code) 528static void i830EmitTexVerified(struct drm_device *dev, unsigned int *code)
531{ 529{
532 drm_i830_private_t *dev_priv = dev->dev_private; 530 drm_i830_private_t *dev_priv = dev->dev_private;
533 int i, j = 0; 531 int i, j = 0;
@@ -561,7 +559,7 @@ static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code)
561 printk("rejected packet %x\n", code[0]); 559 printk("rejected packet %x\n", code[0]);
562} 560}
563 561
564static void i830EmitTexBlendVerified(struct drm_device * dev, 562static void i830EmitTexBlendVerified(struct drm_device *dev,
565 unsigned int *code, unsigned int num) 563 unsigned int *code, unsigned int num)
566{ 564{
567 drm_i830_private_t *dev_priv = dev->dev_private; 565 drm_i830_private_t *dev_priv = dev->dev_private;
@@ -586,7 +584,7 @@ static void i830EmitTexBlendVerified(struct drm_device * dev,
586 ADVANCE_LP_RING(); 584 ADVANCE_LP_RING();
587} 585}
588 586
589static void i830EmitTexPalette(struct drm_device * dev, 587static void i830EmitTexPalette(struct drm_device *dev,
590 unsigned int *palette, int number, int is_shared) 588 unsigned int *palette, int number, int is_shared)
591{ 589{
592 drm_i830_private_t *dev_priv = dev->dev_private; 590 drm_i830_private_t *dev_priv = dev->dev_private;
@@ -603,9 +601,8 @@ static void i830EmitTexPalette(struct drm_device * dev,
603 } else { 601 } else {
604 OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number)); 602 OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
605 } 603 }
606 for (i = 0; i < 256; i++) { 604 for (i = 0; i < 256; i++)
607 OUT_RING(palette[i]); 605 OUT_RING(palette[i]);
608 }
609 OUT_RING(0); 606 OUT_RING(0);
610 /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop! 607 /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
611 */ 608 */
@@ -613,7 +610,7 @@ static void i830EmitTexPalette(struct drm_device * dev,
613 610
614/* Need to do some additional checking when setting the dest buffer. 611/* Need to do some additional checking when setting the dest buffer.
615 */ 612 */
616static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code) 613static void i830EmitDestVerified(struct drm_device *dev, unsigned int *code)
617{ 614{
618 drm_i830_private_t *dev_priv = dev->dev_private; 615 drm_i830_private_t *dev_priv = dev->dev_private;
619 unsigned int tmp; 616 unsigned int tmp;
@@ -674,7 +671,7 @@ static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code)
674 ADVANCE_LP_RING(); 671 ADVANCE_LP_RING();
675} 672}
676 673
677static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code) 674static void i830EmitStippleVerified(struct drm_device *dev, unsigned int *code)
678{ 675{
679 drm_i830_private_t *dev_priv = dev->dev_private; 676 drm_i830_private_t *dev_priv = dev->dev_private;
680 RING_LOCALS; 677 RING_LOCALS;
@@ -685,7 +682,7 @@ static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code)
685 ADVANCE_LP_RING(); 682 ADVANCE_LP_RING();
686} 683}
687 684
688static void i830EmitState(struct drm_device * dev) 685static void i830EmitState(struct drm_device *dev)
689{ 686{
690 drm_i830_private_t *dev_priv = dev->dev_private; 687 drm_i830_private_t *dev_priv = dev->dev_private;
691 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; 688 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -788,7 +785,7 @@ static void i830EmitState(struct drm_device * dev)
788 * Performance monitoring functions 785 * Performance monitoring functions
789 */ 786 */
790 787
791static void i830_fill_box(struct drm_device * dev, 788static void i830_fill_box(struct drm_device *dev,
792 int x, int y, int w, int h, int r, int g, int b) 789 int x, int y, int w, int h, int r, int g, int b)
793{ 790{
794 drm_i830_private_t *dev_priv = dev->dev_private; 791 drm_i830_private_t *dev_priv = dev->dev_private;
@@ -816,17 +813,16 @@ static void i830_fill_box(struct drm_device * dev,
816 OUT_RING((y << 16) | x); 813 OUT_RING((y << 16) | x);
817 OUT_RING(((y + h) << 16) | (x + w)); 814 OUT_RING(((y + h) << 16) | (x + w));
818 815
819 if (dev_priv->current_page == 1) { 816 if (dev_priv->current_page == 1)
820 OUT_RING(dev_priv->front_offset); 817 OUT_RING(dev_priv->front_offset);
821 } else { 818 else
822 OUT_RING(dev_priv->back_offset); 819 OUT_RING(dev_priv->back_offset);
823 }
824 820
825 OUT_RING(color); 821 OUT_RING(color);
826 ADVANCE_LP_RING(); 822 ADVANCE_LP_RING();
827} 823}
828 824
829static void i830_cp_performance_boxes(struct drm_device * dev) 825static void i830_cp_performance_boxes(struct drm_device *dev)
830{ 826{
831 drm_i830_private_t *dev_priv = dev->dev_private; 827 drm_i830_private_t *dev_priv = dev->dev_private;
832 828
@@ -871,7 +867,7 @@ static void i830_cp_performance_boxes(struct drm_device * dev)
871 dev_priv->sarea_priv->perf_boxes = 0; 867 dev_priv->sarea_priv->perf_boxes = 0;
872} 868}
873 869
874static void i830_dma_dispatch_clear(struct drm_device * dev, int flags, 870static void i830_dma_dispatch_clear(struct drm_device *dev, int flags,
875 unsigned int clear_color, 871 unsigned int clear_color,
876 unsigned int clear_zval, 872 unsigned int clear_zval,
877 unsigned int clear_depthmask) 873 unsigned int clear_depthmask)
@@ -966,7 +962,7 @@ static void i830_dma_dispatch_clear(struct drm_device * dev, int flags,
966 } 962 }
967} 963}
968 964
969static void i830_dma_dispatch_swap(struct drm_device * dev) 965static void i830_dma_dispatch_swap(struct drm_device *dev)
970{ 966{
971 drm_i830_private_t *dev_priv = dev->dev_private; 967 drm_i830_private_t *dev_priv = dev->dev_private;
972 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; 968 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -1036,7 +1032,7 @@ static void i830_dma_dispatch_swap(struct drm_device * dev)
1036 } 1032 }
1037} 1033}
1038 1034
1039static void i830_dma_dispatch_flip(struct drm_device * dev) 1035static void i830_dma_dispatch_flip(struct drm_device *dev)
1040{ 1036{
1041 drm_i830_private_t *dev_priv = dev->dev_private; 1037 drm_i830_private_t *dev_priv = dev->dev_private;
1042 RING_LOCALS; 1038 RING_LOCALS;
@@ -1079,8 +1075,8 @@ static void i830_dma_dispatch_flip(struct drm_device * dev)
1079 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 1075 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1080} 1076}
1081 1077
1082static void i830_dma_dispatch_vertex(struct drm_device * dev, 1078static void i830_dma_dispatch_vertex(struct drm_device *dev,
1083 struct drm_buf * buf, int discard, int used) 1079 struct drm_buf *buf, int discard, int used)
1084{ 1080{
1085 drm_i830_private_t *dev_priv = dev->dev_private; 1081 drm_i830_private_t *dev_priv = dev->dev_private;
1086 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 1082 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
@@ -1100,9 +1096,8 @@ static void i830_dma_dispatch_vertex(struct drm_device * dev,
1100 if (discard) { 1096 if (discard) {
1101 u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, 1097 u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1102 I830_BUF_HARDWARE); 1098 I830_BUF_HARDWARE);
1103 if (u != I830_BUF_CLIENT) { 1099 if (u != I830_BUF_CLIENT)
1104 DRM_DEBUG("xxxx 2\n"); 1100 DRM_DEBUG("xxxx 2\n");
1105 }
1106 } 1101 }
1107 1102
1108 if (used > 4 * 1023) 1103 if (used > 4 * 1023)
@@ -1191,7 +1186,7 @@ static void i830_dma_dispatch_vertex(struct drm_device * dev,
1191 } 1186 }
1192} 1187}
1193 1188
1194static void i830_dma_quiescent(struct drm_device * dev) 1189static void i830_dma_quiescent(struct drm_device *dev)
1195{ 1190{
1196 drm_i830_private_t *dev_priv = dev->dev_private; 1191 drm_i830_private_t *dev_priv = dev->dev_private;
1197 RING_LOCALS; 1192 RING_LOCALS;
@@ -1208,7 +1203,7 @@ static void i830_dma_quiescent(struct drm_device * dev)
1208 i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__); 1203 i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
1209} 1204}
1210 1205
1211static int i830_flush_queue(struct drm_device * dev) 1206static int i830_flush_queue(struct drm_device *dev)
1212{ 1207{
1213 drm_i830_private_t *dev_priv = dev->dev_private; 1208 drm_i830_private_t *dev_priv = dev->dev_private;
1214 struct drm_device_dma *dma = dev->dma; 1209 struct drm_device_dma *dma = dev->dma;
@@ -1241,7 +1236,7 @@ static int i830_flush_queue(struct drm_device * dev)
1241} 1236}
1242 1237
1243/* Must be called with the lock held */ 1238/* Must be called with the lock held */
1244static void i830_reclaim_buffers(struct drm_device * dev, struct drm_file *file_priv) 1239static void i830_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1245{ 1240{
1246 struct drm_device_dma *dma = dev->dma; 1241 struct drm_device_dma *dma = dev->dma;
1247 int i; 1242 int i;
@@ -1316,9 +1311,8 @@ static int i830_clear_bufs(struct drm_device *dev, void *data,
1316 LOCK_TEST_WITH_RETURN(dev, file_priv); 1311 LOCK_TEST_WITH_RETURN(dev, file_priv);
1317 1312
1318 /* GH: Someone's doing nasty things... */ 1313 /* GH: Someone's doing nasty things... */
1319 if (!dev->dev_private) { 1314 if (!dev->dev_private)
1320 return -EINVAL; 1315 return -EINVAL;
1321 }
1322 1316
1323 i830_dma_dispatch_clear(dev, clear->flags, 1317 i830_dma_dispatch_clear(dev, clear->flags,
1324 clear->clear_color, 1318 clear->clear_color,
@@ -1339,7 +1333,7 @@ static int i830_swap_bufs(struct drm_device *dev, void *data,
1339 1333
1340/* Not sure why this isn't set all the time: 1334/* Not sure why this isn't set all the time:
1341 */ 1335 */
1342static void i830_do_init_pageflip(struct drm_device * dev) 1336static void i830_do_init_pageflip(struct drm_device *dev)
1343{ 1337{
1344 drm_i830_private_t *dev_priv = dev->dev_private; 1338 drm_i830_private_t *dev_priv = dev->dev_private;
1345 1339
@@ -1349,7 +1343,7 @@ static void i830_do_init_pageflip(struct drm_device * dev)
1349 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 1343 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1350} 1344}
1351 1345
1352static int i830_do_cleanup_pageflip(struct drm_device * dev) 1346static int i830_do_cleanup_pageflip(struct drm_device *dev)
1353{ 1347{
1354 drm_i830_private_t *dev_priv = dev->dev_private; 1348 drm_i830_private_t *dev_priv = dev->dev_private;
1355 1349
@@ -1490,27 +1484,26 @@ int i830_driver_load(struct drm_device *dev, unsigned long flags)
1490 return 0; 1484 return 0;
1491} 1485}
1492 1486
1493void i830_driver_lastclose(struct drm_device * dev) 1487void i830_driver_lastclose(struct drm_device *dev)
1494{ 1488{
1495 i830_dma_cleanup(dev); 1489 i830_dma_cleanup(dev);
1496} 1490}
1497 1491
1498void i830_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1492void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1499{ 1493{
1500 if (dev->dev_private) { 1494 if (dev->dev_private) {
1501 drm_i830_private_t *dev_priv = dev->dev_private; 1495 drm_i830_private_t *dev_priv = dev->dev_private;
1502 if (dev_priv->page_flipping) { 1496 if (dev_priv->page_flipping)
1503 i830_do_cleanup_pageflip(dev); 1497 i830_do_cleanup_pageflip(dev);
1504 }
1505 } 1498 }
1506} 1499}
1507 1500
1508void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct drm_file *file_priv) 1501void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv)
1509{ 1502{
1510 i830_reclaim_buffers(dev, file_priv); 1503 i830_reclaim_buffers(dev, file_priv);
1511} 1504}
1512 1505
1513int i830_driver_dma_quiescent(struct drm_device * dev) 1506int i830_driver_dma_quiescent(struct drm_device *dev)
1514{ 1507{
1515 i830_dma_quiescent(dev); 1508 i830_dma_quiescent(dev);
1516 return 0; 1509 return 0;
@@ -1546,7 +1539,7 @@ int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
1546 * \returns 1539 * \returns
1547 * A value of 1 is always retured to indictate every i8xx is AGP. 1540 * A value of 1 is always retured to indictate every i8xx is AGP.
1548 */ 1541 */
1549int i830_driver_device_is_agp(struct drm_device * dev) 1542int i830_driver_device_is_agp(struct drm_device *dev)
1550{ 1543{
1551 return 1; 1544 return 1;
1552} 1545}
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
index da82afe4ded5..ecfd25a35da3 100644
--- a/drivers/gpu/drm/i830/i830_drv.h
+++ b/drivers/gpu/drm/i830/i830_drv.h
@@ -132,33 +132,33 @@ extern int i830_irq_wait(struct drm_device *dev, void *data,
132 struct drm_file *file_priv); 132 struct drm_file *file_priv);
133 133
134extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS); 134extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
135extern void i830_driver_irq_preinstall(struct drm_device * dev); 135extern void i830_driver_irq_preinstall(struct drm_device *dev);
136extern void i830_driver_irq_postinstall(struct drm_device * dev); 136extern void i830_driver_irq_postinstall(struct drm_device *dev);
137extern void i830_driver_irq_uninstall(struct drm_device * dev); 137extern void i830_driver_irq_uninstall(struct drm_device *dev);
138extern int i830_driver_load(struct drm_device *, unsigned long flags); 138extern int i830_driver_load(struct drm_device *, unsigned long flags);
139extern void i830_driver_preclose(struct drm_device * dev, 139extern void i830_driver_preclose(struct drm_device *dev,
140 struct drm_file *file_priv); 140 struct drm_file *file_priv);
141extern void i830_driver_lastclose(struct drm_device * dev); 141extern void i830_driver_lastclose(struct drm_device *dev);
142extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev, 142extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev,
143 struct drm_file *file_priv); 143 struct drm_file *file_priv);
144extern int i830_driver_dma_quiescent(struct drm_device * dev); 144extern int i830_driver_dma_quiescent(struct drm_device *dev);
145extern int i830_driver_device_is_agp(struct drm_device * dev); 145extern int i830_driver_device_is_agp(struct drm_device *dev);
146 146
147#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg) 147#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
148#define I830_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val) 148#define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
149#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg) 149#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
150#define I830_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, reg, val) 150#define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
151 151
152#define I830_VERBOSE 0 152#define I830_VERBOSE 0
153 153
154#define RING_LOCALS unsigned int outring, ringmask, outcount; \ 154#define RING_LOCALS unsigned int outring, ringmask, outcount; \
155 volatile char *virt; 155 volatile char *virt;
156 156
157#define BEGIN_LP_RING(n) do { \ 157#define BEGIN_LP_RING(n) do { \
158 if (I830_VERBOSE) \ 158 if (I830_VERBOSE) \
159 printk("BEGIN_LP_RING(%d)\n", (n)); \ 159 printk("BEGIN_LP_RING(%d)\n", (n)); \
160 if (dev_priv->ring.space < n*4) \ 160 if (dev_priv->ring.space < n*4) \
161 i830_wait_ring(dev, n*4, __func__); \ 161 i830_wait_ring(dev, n*4, __func__); \
162 outcount = 0; \ 162 outcount = 0; \
163 outring = dev_priv->ring.tail; \ 163 outring = dev_priv->ring.tail; \
164 ringmask = dev_priv->ring.tail_mask; \ 164 ringmask = dev_priv->ring.tail_mask; \
@@ -166,21 +166,23 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
166} while (0) 166} while (0)
167 167
168#define OUT_RING(n) do { \ 168#define OUT_RING(n) do { \
169 if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \ 169 if (I830_VERBOSE) \
170 printk(" OUT_RING %x\n", (int)(n)); \
170 *(volatile unsigned int *)(virt + outring) = n; \ 171 *(volatile unsigned int *)(virt + outring) = n; \
171 outcount++; \ 172 outcount++; \
172 outring += 4; \ 173 outring += 4; \
173 outring &= ringmask; \ 174 outring &= ringmask; \
174} while (0) 175} while (0)
175 176
176#define ADVANCE_LP_RING() do { \ 177#define ADVANCE_LP_RING() do { \
177 if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \ 178 if (I830_VERBOSE) \
178 dev_priv->ring.tail = outring; \ 179 printk("ADVANCE_LP_RING %x\n", outring); \
179 dev_priv->ring.space -= outcount * 4; \ 180 dev_priv->ring.tail = outring; \
180 I830_WRITE(LP_RING + RING_TAIL, outring); \ 181 dev_priv->ring.space -= outcount * 4; \
181} while(0) 182 I830_WRITE(LP_RING + RING_TAIL, outring); \
183} while (0)
182 184
183extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller); 185extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller);
184 186
185#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) 187#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
186#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) 188#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
index 91ec2bb497e9..d1a6b95d631d 100644
--- a/drivers/gpu/drm/i830/i830_irq.c
+++ b/drivers/gpu/drm/i830/i830_irq.c
@@ -53,7 +53,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
53 return IRQ_HANDLED; 53 return IRQ_HANDLED;
54} 54}
55 55
56static int i830_emit_irq(struct drm_device * dev) 56static int i830_emit_irq(struct drm_device *dev)
57{ 57{
58 drm_i830_private_t *dev_priv = dev->dev_private; 58 drm_i830_private_t *dev_priv = dev->dev_private;
59 RING_LOCALS; 59 RING_LOCALS;
@@ -70,7 +70,7 @@ static int i830_emit_irq(struct drm_device * dev)
70 return atomic_read(&dev_priv->irq_emitted); 70 return atomic_read(&dev_priv->irq_emitted);
71} 71}
72 72
73static int i830_wait_irq(struct drm_device * dev, int irq_nr) 73static int i830_wait_irq(struct drm_device *dev, int irq_nr)
74{ 74{
75 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 75 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
76 DECLARE_WAITQUEUE(entry, current); 76 DECLARE_WAITQUEUE(entry, current);
@@ -156,7 +156,7 @@ int i830_irq_wait(struct drm_device *dev, void *data,
156 156
157/* drm_dma.h hooks 157/* drm_dma.h hooks
158*/ 158*/
159void i830_driver_irq_preinstall(struct drm_device * dev) 159void i830_driver_irq_preinstall(struct drm_device *dev)
160{ 160{
161 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 161 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
162 162
@@ -168,14 +168,14 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
168 init_waitqueue_head(&dev_priv->irq_queue); 168 init_waitqueue_head(&dev_priv->irq_queue);
169} 169}
170 170
171void i830_driver_irq_postinstall(struct drm_device * dev) 171void i830_driver_irq_postinstall(struct drm_device *dev)
172{ 172{
173 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 173 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
174 174
175 I830_WRITE16(I830REG_INT_ENABLE_R, 0x2); 175 I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
176} 176}
177 177
178void i830_driver_irq_uninstall(struct drm_device * dev) 178void i830_driver_irq_uninstall(struct drm_device *dev)
179{ 179{
180 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 180 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
181 if (!dev_priv) 181 if (!dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 75061b305b8c..15d2d93aaca9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2241,6 +2241,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
2241 page = read_cache_page_gfp(mapping, i, 2241 page = read_cache_page_gfp(mapping, i,
2242 GFP_HIGHUSER | 2242 GFP_HIGHUSER |
2243 __GFP_COLD | 2243 __GFP_COLD |
2244 __GFP_RECLAIMABLE |
2244 gfpmask); 2245 gfpmask);
2245 if (IS_ERR(page)) 2246 if (IS_ERR(page))
2246 goto err_pages; 2247 goto err_pages;
@@ -4739,6 +4740,16 @@ i915_gem_load(struct drm_device *dev)
4739 list_add(&dev_priv->mm.shrink_list, &shrink_list); 4740 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4740 spin_unlock(&shrink_list_lock); 4741 spin_unlock(&shrink_list_lock);
4741 4742
4743 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4744 if (IS_GEN3(dev)) {
4745 u32 tmp = I915_READ(MI_ARB_STATE);
4746 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4747 /* arb state is a masked write, so set bit + bit in mask */
4748 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4749 I915_WRITE(MI_ARB_STATE, tmp);
4750 }
4751 }
4752
4742 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4753 /* Old X drivers will take 0-2 for front, back, depth buffers */
4743 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4754 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4744 dev_priv->fence_reg_start = 3; 4755 dev_priv->fence_reg_start = 3;
@@ -4975,7 +4986,7 @@ i915_gpu_is_active(struct drm_device *dev)
4975} 4986}
4976 4987
4977static int 4988static int
4978i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) 4989i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
4979{ 4990{
4980 drm_i915_private_t *dev_priv, *next_dev; 4991 drm_i915_private_t *dev_priv, *next_dev;
4981 struct drm_i915_gem_object *obj_priv, *next_obj; 4992 struct drm_i915_gem_object *obj_priv, *next_obj;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 150400f40534..6d9b0288272a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -359,6 +359,70 @@
359#define LM_BURST_LENGTH 0x00000700 359#define LM_BURST_LENGTH 0x00000700
360#define LM_FIFO_WATERMARK 0x0000001F 360#define LM_FIFO_WATERMARK 0x0000001F
361#define MI_ARB_STATE 0x020e4 /* 915+ only */ 361#define MI_ARB_STATE 0x020e4 /* 915+ only */
362#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
363
364/* Make render/texture TLB fetches lower priorty than associated data
365 * fetches. This is not turned on by default
366 */
367#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
368
369/* Isoch request wait on GTT enable (Display A/B/C streams).
370 * Make isoch requests stall on the TLB update. May cause
371 * display underruns (test mode only)
372 */
373#define MI_ARB_ISOCH_WAIT_GTT (1 << 14)
374
375/* Block grant count for isoch requests when block count is
376 * set to a finite value.
377 */
378#define MI_ARB_BLOCK_GRANT_MASK (3 << 12)
379#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */
380#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */
381#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */
382#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */
383
384/* Enable render writes to complete in C2/C3/C4 power states.
385 * If this isn't enabled, render writes are prevented in low
386 * power states. That seems bad to me.
387 */
388#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11)
389
390/* This acknowledges an async flip immediately instead
391 * of waiting for 2TLB fetches.
392 */
393#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10)
394
395/* Enables non-sequential data reads through arbiter
396 */
397#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
398
399/* Disable FSB snooping of cacheable write cycles from binner/render
400 * command stream
401 */
402#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8)
403
404/* Arbiter time slice for non-isoch streams */
405#define MI_ARB_TIME_SLICE_MASK (7 << 5)
406#define MI_ARB_TIME_SLICE_1 (0 << 5)
407#define MI_ARB_TIME_SLICE_2 (1 << 5)
408#define MI_ARB_TIME_SLICE_4 (2 << 5)
409#define MI_ARB_TIME_SLICE_6 (3 << 5)
410#define MI_ARB_TIME_SLICE_8 (4 << 5)
411#define MI_ARB_TIME_SLICE_10 (5 << 5)
412#define MI_ARB_TIME_SLICE_14 (6 << 5)
413#define MI_ARB_TIME_SLICE_16 (7 << 5)
414
415/* Low priority grace period page size */
416#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */
417#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4)
418
419/* Disable display A/B trickle feed */
420#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2)
421
422/* Set display plane priority */
423#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
424#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
425
362#define CACHE_MODE_0 0x02120 /* 915+ only */ 426#define CACHE_MODE_0 0x02120 /* 915+ only */
363#define CM0_MASK_SHIFT 16 427#define CM0_MASK_SHIFT 16
364#define CM0_IZ_OPT_DISABLE (1<<6) 428#define CM0_IZ_OPT_DISABLE (1<<6)
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index ccc129c328a4..08868ac3048a 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -52,7 +52,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
52 * Engine control 52 * Engine control
53 */ 53 */
54 54
55int mga_do_wait_for_idle(drm_mga_private_t * dev_priv) 55int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
56{ 56{
57 u32 status = 0; 57 u32 status = 0;
58 int i; 58 int i;
@@ -74,7 +74,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
74 return -EBUSY; 74 return -EBUSY;
75} 75}
76 76
77static int mga_do_dma_reset(drm_mga_private_t * dev_priv) 77static int mga_do_dma_reset(drm_mga_private_t *dev_priv)
78{ 78{
79 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 79 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
80 drm_mga_primary_buffer_t *primary = &dev_priv->prim; 80 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
@@ -102,7 +102,7 @@ static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
102 * Primary DMA stream 102 * Primary DMA stream
103 */ 103 */
104 104
105void mga_do_dma_flush(drm_mga_private_t * dev_priv) 105void mga_do_dma_flush(drm_mga_private_t *dev_priv)
106{ 106{
107 drm_mga_primary_buffer_t *primary = &dev_priv->prim; 107 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
108 u32 head, tail; 108 u32 head, tail;
@@ -142,11 +142,10 @@ void mga_do_dma_flush(drm_mga_private_t * dev_priv)
142 142
143 head = MGA_READ(MGA_PRIMADDRESS); 143 head = MGA_READ(MGA_PRIMADDRESS);
144 144
145 if (head <= tail) { 145 if (head <= tail)
146 primary->space = primary->size - primary->tail; 146 primary->space = primary->size - primary->tail;
147 } else { 147 else
148 primary->space = head - tail; 148 primary->space = head - tail;
149 }
150 149
151 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); 150 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
152 DRM_DEBUG(" tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset)); 151 DRM_DEBUG(" tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset));
@@ -158,7 +157,7 @@ void mga_do_dma_flush(drm_mga_private_t * dev_priv)
158 DRM_DEBUG("done.\n"); 157 DRM_DEBUG("done.\n");
159} 158}
160 159
161void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv) 160void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv)
162{ 161{
163 drm_mga_primary_buffer_t *primary = &dev_priv->prim; 162 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
164 u32 head, tail; 163 u32 head, tail;
@@ -181,11 +180,10 @@ void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
181 180
182 head = MGA_READ(MGA_PRIMADDRESS); 181 head = MGA_READ(MGA_PRIMADDRESS);
183 182
184 if (head == dev_priv->primary->offset) { 183 if (head == dev_priv->primary->offset)
185 primary->space = primary->size; 184 primary->space = primary->size;
186 } else { 185 else
187 primary->space = head - dev_priv->primary->offset; 186 primary->space = head - dev_priv->primary->offset;
188 }
189 187
190 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); 188 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
191 DRM_DEBUG(" tail = 0x%06x\n", primary->tail); 189 DRM_DEBUG(" tail = 0x%06x\n", primary->tail);
@@ -199,7 +197,7 @@ void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
199 DRM_DEBUG("done.\n"); 197 DRM_DEBUG("done.\n");
200} 198}
201 199
202void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv) 200void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv)
203{ 201{
204 drm_mga_primary_buffer_t *primary = &dev_priv->prim; 202 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
205 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 203 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -220,11 +218,11 @@ void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
220 * Freelist management 218 * Freelist management
221 */ 219 */
222 220
223#define MGA_BUFFER_USED ~0 221#define MGA_BUFFER_USED (~0)
224#define MGA_BUFFER_FREE 0 222#define MGA_BUFFER_FREE 0
225 223
226#if MGA_FREELIST_DEBUG 224#if MGA_FREELIST_DEBUG
227static void mga_freelist_print(struct drm_device * dev) 225static void mga_freelist_print(struct drm_device *dev)
228{ 226{
229 drm_mga_private_t *dev_priv = dev->dev_private; 227 drm_mga_private_t *dev_priv = dev->dev_private;
230 drm_mga_freelist_t *entry; 228 drm_mga_freelist_t *entry;
@@ -245,7 +243,7 @@ static void mga_freelist_print(struct drm_device * dev)
245} 243}
246#endif 244#endif
247 245
248static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv) 246static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv)
249{ 247{
250 struct drm_device_dma *dma = dev->dma; 248 struct drm_device_dma *dma = dev->dma;
251 struct drm_buf *buf; 249 struct drm_buf *buf;
@@ -288,7 +286,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
288 return 0; 286 return 0;
289} 287}
290 288
291static void mga_freelist_cleanup(struct drm_device * dev) 289static void mga_freelist_cleanup(struct drm_device *dev)
292{ 290{
293 drm_mga_private_t *dev_priv = dev->dev_private; 291 drm_mga_private_t *dev_priv = dev->dev_private;
294 drm_mga_freelist_t *entry; 292 drm_mga_freelist_t *entry;
@@ -308,7 +306,7 @@ static void mga_freelist_cleanup(struct drm_device * dev)
308#if 0 306#if 0
309/* FIXME: Still needed? 307/* FIXME: Still needed?
310 */ 308 */
311static void mga_freelist_reset(struct drm_device * dev) 309static void mga_freelist_reset(struct drm_device *dev)
312{ 310{
313 struct drm_device_dma *dma = dev->dma; 311 struct drm_device_dma *dma = dev->dma;
314 struct drm_buf *buf; 312 struct drm_buf *buf;
@@ -356,7 +354,7 @@ static struct drm_buf *mga_freelist_get(struct drm_device * dev)
356 return NULL; 354 return NULL;
357} 355}
358 356
359int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) 357int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf)
360{ 358{
361 drm_mga_private_t *dev_priv = dev->dev_private; 359 drm_mga_private_t *dev_priv = dev->dev_private;
362 drm_mga_buf_priv_t *buf_priv = buf->dev_private; 360 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -391,7 +389,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
391 * DMA initialization, cleanup 389 * DMA initialization, cleanup
392 */ 390 */
393 391
394int mga_driver_load(struct drm_device * dev, unsigned long flags) 392int mga_driver_load(struct drm_device *dev, unsigned long flags)
395{ 393{
396 drm_mga_private_t *dev_priv; 394 drm_mga_private_t *dev_priv;
397 int ret; 395 int ret;
@@ -439,8 +437,8 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
439 * 437 *
440 * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap 438 * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
441 */ 439 */
442static int mga_do_agp_dma_bootstrap(struct drm_device * dev, 440static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
443 drm_mga_dma_bootstrap_t * dma_bs) 441 drm_mga_dma_bootstrap_t *dma_bs)
444{ 442{
445 drm_mga_private_t *const dev_priv = 443 drm_mga_private_t *const dev_priv =
446 (drm_mga_private_t *) dev->dev_private; 444 (drm_mga_private_t *) dev->dev_private;
@@ -481,11 +479,10 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
481 */ 479 */
482 480
483 if (dev_priv->chipset == MGA_CARD_TYPE_G200) { 481 if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
484 if (mode.mode & 0x02) { 482 if (mode.mode & 0x02)
485 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE); 483 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
486 } else { 484 else
487 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE); 485 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
488 }
489 } 486 }
490 487
491 /* Allocate and bind AGP memory. */ 488 /* Allocate and bind AGP memory. */
@@ -593,8 +590,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
593 return 0; 590 return 0;
594} 591}
595#else 592#else
596static int mga_do_agp_dma_bootstrap(struct drm_device * dev, 593static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
597 drm_mga_dma_bootstrap_t * dma_bs) 594 drm_mga_dma_bootstrap_t *dma_bs)
598{ 595{
599 return -EINVAL; 596 return -EINVAL;
600} 597}
@@ -614,8 +611,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
614 * 611 *
615 * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap 612 * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
616 */ 613 */
617static int mga_do_pci_dma_bootstrap(struct drm_device * dev, 614static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
618 drm_mga_dma_bootstrap_t * dma_bs) 615 drm_mga_dma_bootstrap_t *dma_bs)
619{ 616{
620 drm_mga_private_t *const dev_priv = 617 drm_mga_private_t *const dev_priv =
621 (drm_mga_private_t *) dev->dev_private; 618 (drm_mga_private_t *) dev->dev_private;
@@ -678,9 +675,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
678 req.size = dma_bs->secondary_bin_size; 675 req.size = dma_bs->secondary_bin_size;
679 676
680 err = drm_addbufs_pci(dev, &req); 677 err = drm_addbufs_pci(dev, &req);
681 if (!err) { 678 if (!err)
682 break; 679 break;
683 }
684 } 680 }
685 681
686 if (bin_count == 0) { 682 if (bin_count == 0) {
@@ -704,8 +700,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
704 return 0; 700 return 0;
705} 701}
706 702
707static int mga_do_dma_bootstrap(struct drm_device * dev, 703static int mga_do_dma_bootstrap(struct drm_device *dev,
708 drm_mga_dma_bootstrap_t * dma_bs) 704 drm_mga_dma_bootstrap_t *dma_bs)
709{ 705{
710 const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); 706 const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
711 int err; 707 int err;
@@ -737,17 +733,15 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
737 * carve off portions of it for internal uses. The remaining memory 733 * carve off portions of it for internal uses. The remaining memory
738 * is returned to user-mode to be used for AGP textures. 734 * is returned to user-mode to be used for AGP textures.
739 */ 735 */
740 if (is_agp) { 736 if (is_agp)
741 err = mga_do_agp_dma_bootstrap(dev, dma_bs); 737 err = mga_do_agp_dma_bootstrap(dev, dma_bs);
742 }
743 738
744 /* If we attempted to initialize the card for AGP DMA but failed, 739 /* If we attempted to initialize the card for AGP DMA but failed,
745 * clean-up any mess that may have been created. 740 * clean-up any mess that may have been created.
746 */ 741 */
747 742
748 if (err) { 743 if (err)
749 mga_do_cleanup_dma(dev, MINIMAL_CLEANUP); 744 mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
750 }
751 745
752 /* Not only do we want to try and initialized PCI cards for PCI DMA, 746 /* Not only do we want to try and initialized PCI cards for PCI DMA,
753 * but we also try to initialized AGP cards that could not be 747 * but we also try to initialized AGP cards that could not be
@@ -757,9 +751,8 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
757 * AGP memory, etc. 751 * AGP memory, etc.
758 */ 752 */
759 753
760 if (!is_agp || err) { 754 if (!is_agp || err)
761 err = mga_do_pci_dma_bootstrap(dev, dma_bs); 755 err = mga_do_pci_dma_bootstrap(dev, dma_bs);
762 }
763 756
764 return err; 757 return err;
765} 758}
@@ -792,7 +785,7 @@ int mga_dma_bootstrap(struct drm_device *dev, void *data,
792 return err; 785 return err;
793} 786}
794 787
795static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) 788static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
796{ 789{
797 drm_mga_private_t *dev_priv; 790 drm_mga_private_t *dev_priv;
798 int ret; 791 int ret;
@@ -800,11 +793,10 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
800 793
801 dev_priv = dev->dev_private; 794 dev_priv = dev->dev_private;
802 795
803 if (init->sgram) { 796 if (init->sgram)
804 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; 797 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
805 } else { 798 else
806 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; 799 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
807 }
808 dev_priv->maccess = init->maccess; 800 dev_priv->maccess = init->maccess;
809 801
810 dev_priv->fb_cpp = init->fb_cpp; 802 dev_priv->fb_cpp = init->fb_cpp;
@@ -975,9 +967,8 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
975 dev_priv->agp_handle = 0; 967 dev_priv->agp_handle = 0;
976 } 968 }
977 969
978 if ((dev->agp != NULL) && dev->agp->acquired) { 970 if ((dev->agp != NULL) && dev->agp->acquired)
979 err = drm_agp_release(dev); 971 err = drm_agp_release(dev);
980 }
981#endif 972#endif
982 } 973 }
983 974
@@ -998,9 +989,8 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
998 memset(dev_priv->warp_pipe_phys, 0, 989 memset(dev_priv->warp_pipe_phys, 0,
999 sizeof(dev_priv->warp_pipe_phys)); 990 sizeof(dev_priv->warp_pipe_phys));
1000 991
1001 if (dev_priv->head != NULL) { 992 if (dev_priv->head != NULL)
1002 mga_freelist_cleanup(dev); 993 mga_freelist_cleanup(dev);
1003 }
1004 } 994 }
1005 995
1006 return err; 996 return err;
@@ -1017,9 +1007,8 @@ int mga_dma_init(struct drm_device *dev, void *data,
1017 switch (init->func) { 1007 switch (init->func) {
1018 case MGA_INIT_DMA: 1008 case MGA_INIT_DMA:
1019 err = mga_do_init_dma(dev, init); 1009 err = mga_do_init_dma(dev, init);
1020 if (err) { 1010 if (err)
1021 (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); 1011 (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
1022 }
1023 return err; 1012 return err;
1024 case MGA_CLEANUP_DMA: 1013 case MGA_CLEANUP_DMA:
1025 return mga_do_cleanup_dma(dev, FULL_CLEANUP); 1014 return mga_do_cleanup_dma(dev, FULL_CLEANUP);
@@ -1047,9 +1036,8 @@ int mga_dma_flush(struct drm_device *dev, void *data,
1047 1036
1048 WRAP_WAIT_WITH_RETURN(dev_priv); 1037 WRAP_WAIT_WITH_RETURN(dev_priv);
1049 1038
1050 if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { 1039 if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL))
1051 mga_do_dma_flush(dev_priv); 1040 mga_do_dma_flush(dev_priv);
1052 }
1053 1041
1054 if (lock->flags & _DRM_LOCK_QUIESCENT) { 1042 if (lock->flags & _DRM_LOCK_QUIESCENT) {
1055#if MGA_DMA_DEBUG 1043#if MGA_DMA_DEBUG
@@ -1079,8 +1067,8 @@ int mga_dma_reset(struct drm_device *dev, void *data,
1079 * DMA buffer management 1067 * DMA buffer management
1080 */ 1068 */
1081 1069
1082static int mga_dma_get_buffers(struct drm_device * dev, 1070static int mga_dma_get_buffers(struct drm_device *dev,
1083 struct drm_file *file_priv, struct drm_dma * d) 1071 struct drm_file *file_priv, struct drm_dma *d)
1084{ 1072{
1085 struct drm_buf *buf; 1073 struct drm_buf *buf;
1086 int i; 1074 int i;
@@ -1134,9 +1122,8 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
1134 1122
1135 d->granted_count = 0; 1123 d->granted_count = 0;
1136 1124
1137 if (d->request_count) { 1125 if (d->request_count)
1138 ret = mga_dma_get_buffers(dev, file_priv, d); 1126 ret = mga_dma_get_buffers(dev, file_priv, d);
1139 }
1140 1127
1141 return ret; 1128 return ret;
1142} 1129}
@@ -1144,7 +1131,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
1144/** 1131/**
1145 * Called just before the module is unloaded. 1132 * Called just before the module is unloaded.
1146 */ 1133 */
1147int mga_driver_unload(struct drm_device * dev) 1134int mga_driver_unload(struct drm_device *dev)
1148{ 1135{
1149 kfree(dev->dev_private); 1136 kfree(dev->dev_private);
1150 dev->dev_private = NULL; 1137 dev->dev_private = NULL;
@@ -1155,12 +1142,12 @@ int mga_driver_unload(struct drm_device * dev)
1155/** 1142/**
1156 * Called when the last opener of the device is closed. 1143 * Called when the last opener of the device is closed.
1157 */ 1144 */
1158void mga_driver_lastclose(struct drm_device * dev) 1145void mga_driver_lastclose(struct drm_device *dev)
1159{ 1146{
1160 mga_do_cleanup_dma(dev, FULL_CLEANUP); 1147 mga_do_cleanup_dma(dev, FULL_CLEANUP);
1161} 1148}
1162 1149
1163int mga_driver_dma_quiescent(struct drm_device * dev) 1150int mga_driver_dma_quiescent(struct drm_device *dev)
1164{ 1151{
1165 drm_mga_private_t *dev_priv = dev->dev_private; 1152 drm_mga_private_t *dev_priv = dev->dev_private;
1166 return mga_do_wait_for_idle(dev_priv); 1153 return mga_do_wait_for_idle(dev_priv);
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index ddfe16197b59..26d0d8ced80d 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -36,7 +36,7 @@
36 36
37#include "drm_pciids.h" 37#include "drm_pciids.h"
38 38
39static int mga_driver_device_is_agp(struct drm_device * dev); 39static int mga_driver_device_is_agp(struct drm_device *dev);
40 40
41static struct pci_device_id pciidlist[] = { 41static struct pci_device_id pciidlist[] = {
42 mga_PCI_IDS 42 mga_PCI_IDS
@@ -119,7 +119,7 @@ MODULE_LICENSE("GPL and additional rights");
119 * \returns 119 * \returns
120 * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. 120 * If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
121 */ 121 */
122static int mga_driver_device_is_agp(struct drm_device * dev) 122static int mga_driver_device_is_agp(struct drm_device *dev)
123{ 123{
124 const struct pci_dev *const pdev = dev->pdev; 124 const struct pci_dev *const pdev = dev->pdev;
125 125
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index be6c6b9b0e89..1084fa4d261b 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -164,59 +164,59 @@ extern int mga_dma_reset(struct drm_device *dev, void *data,
164extern int mga_dma_buffers(struct drm_device *dev, void *data, 164extern int mga_dma_buffers(struct drm_device *dev, void *data,
165 struct drm_file *file_priv); 165 struct drm_file *file_priv);
166extern int mga_driver_load(struct drm_device *dev, unsigned long flags); 166extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
167extern int mga_driver_unload(struct drm_device * dev); 167extern int mga_driver_unload(struct drm_device *dev);
168extern void mga_driver_lastclose(struct drm_device * dev); 168extern void mga_driver_lastclose(struct drm_device *dev);
169extern int mga_driver_dma_quiescent(struct drm_device * dev); 169extern int mga_driver_dma_quiescent(struct drm_device *dev);
170 170
171extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv); 171extern int mga_do_wait_for_idle(drm_mga_private_t *dev_priv);
172 172
173extern void mga_do_dma_flush(drm_mga_private_t * dev_priv); 173extern void mga_do_dma_flush(drm_mga_private_t *dev_priv);
174extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv); 174extern void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv);
175extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv); 175extern void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv);
176 176
177extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf); 177extern int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf);
178 178
179 /* mga_warp.c */ 179 /* mga_warp.c */
180extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv); 180extern int mga_warp_install_microcode(drm_mga_private_t *dev_priv);
181extern int mga_warp_init(drm_mga_private_t * dev_priv); 181extern int mga_warp_init(drm_mga_private_t *dev_priv);
182 182
183 /* mga_irq.c */ 183 /* mga_irq.c */
184extern int mga_enable_vblank(struct drm_device *dev, int crtc); 184extern int mga_enable_vblank(struct drm_device *dev, int crtc);
185extern void mga_disable_vblank(struct drm_device *dev, int crtc); 185extern void mga_disable_vblank(struct drm_device *dev, int crtc);
186extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc); 186extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
187extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); 187extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
188extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 188extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
189extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); 189extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
190extern void mga_driver_irq_preinstall(struct drm_device * dev); 190extern void mga_driver_irq_preinstall(struct drm_device *dev);
191extern int mga_driver_irq_postinstall(struct drm_device *dev); 191extern int mga_driver_irq_postinstall(struct drm_device *dev);
192extern void mga_driver_irq_uninstall(struct drm_device * dev); 192extern void mga_driver_irq_uninstall(struct drm_device *dev);
193extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, 193extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
194 unsigned long arg); 194 unsigned long arg);
195 195
196#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER() 196#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
197 197
198#if defined(__linux__) && defined(__alpha__) 198#if defined(__linux__) && defined(__alpha__)
199#define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle)) 199#define MGA_BASE(reg) ((unsigned long)(dev_priv->mmio->handle))
200#define MGA_ADDR( reg ) (MGA_BASE(reg) + reg) 200#define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
201 201
202#define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg ) 202#define MGA_DEREF(reg) (*(volatile u32 *)MGA_ADDR(reg))
203#define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg ) 203#define MGA_DEREF8(reg) (*(volatile u8 *)MGA_ADDR(reg))
204 204
205#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg))) 205#define MGA_READ(reg) (_MGA_READ((u32 *)MGA_ADDR(reg)))
206#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg))) 206#define MGA_READ8(reg) (_MGA_READ((u8 *)MGA_ADDR(reg)))
207#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0) 207#define MGA_WRITE(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF(reg) = val; } while (0)
208#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0) 208#define MGA_WRITE8(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8(reg) = val; } while (0)
209 209
210static inline u32 _MGA_READ(u32 * addr) 210static inline u32 _MGA_READ(u32 *addr)
211{ 211{
212 DRM_MEMORYBARRIER(); 212 DRM_MEMORYBARRIER();
213 return *(volatile u32 *)addr; 213 return *(volatile u32 *)addr;
214} 214}
215#else 215#else
216#define MGA_READ8( reg ) DRM_READ8(dev_priv->mmio, (reg)) 216#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
217#define MGA_READ( reg ) DRM_READ32(dev_priv->mmio, (reg)) 217#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
218#define MGA_WRITE8( reg, val ) DRM_WRITE8(dev_priv->mmio, (reg), (val)) 218#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
219#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val)) 219#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
220#endif 220#endif
221 221
222#define DWGREG0 0x1c00 222#define DWGREG0 0x1c00
@@ -233,40 +233,39 @@ static inline u32 _MGA_READ(u32 * addr)
233 * Helper macross... 233 * Helper macross...
234 */ 234 */
235 235
236#define MGA_EMIT_STATE( dev_priv, dirty ) \ 236#define MGA_EMIT_STATE(dev_priv, dirty) \
237do { \ 237do { \
238 if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \ 238 if ((dirty) & ~MGA_UPLOAD_CLIPRECTS) { \
239 if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \ 239 if (dev_priv->chipset >= MGA_CARD_TYPE_G400) \
240 mga_g400_emit_state( dev_priv ); \ 240 mga_g400_emit_state(dev_priv); \
241 } else { \ 241 else \
242 mga_g200_emit_state( dev_priv ); \ 242 mga_g200_emit_state(dev_priv); \
243 } \
244 } \ 243 } \
245} while (0) 244} while (0)
246 245
247#define WRAP_TEST_WITH_RETURN( dev_priv ) \ 246#define WRAP_TEST_WITH_RETURN(dev_priv) \
248do { \ 247do { \
249 if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ 248 if (test_bit(0, &dev_priv->prim.wrapped)) { \
250 if ( mga_is_idle( dev_priv ) ) { \ 249 if (mga_is_idle(dev_priv)) { \
251 mga_do_dma_wrap_end( dev_priv ); \ 250 mga_do_dma_wrap_end(dev_priv); \
252 } else if ( dev_priv->prim.space < \ 251 } else if (dev_priv->prim.space < \
253 dev_priv->prim.high_mark ) { \ 252 dev_priv->prim.high_mark) { \
254 if ( MGA_DMA_DEBUG ) \ 253 if (MGA_DMA_DEBUG) \
255 DRM_INFO( "wrap...\n"); \ 254 DRM_INFO("wrap...\n"); \
256 return -EBUSY; \ 255 return -EBUSY; \
257 } \ 256 } \
258 } \ 257 } \
259} while (0) 258} while (0)
260 259
261#define WRAP_WAIT_WITH_RETURN( dev_priv ) \ 260#define WRAP_WAIT_WITH_RETURN(dev_priv) \
262do { \ 261do { \
263 if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ 262 if (test_bit(0, &dev_priv->prim.wrapped)) { \
264 if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \ 263 if (mga_do_wait_for_idle(dev_priv) < 0) { \
265 if ( MGA_DMA_DEBUG ) \ 264 if (MGA_DMA_DEBUG) \
266 DRM_INFO( "wrap...\n"); \ 265 DRM_INFO("wrap...\n"); \
267 return -EBUSY; \ 266 return -EBUSY; \
268 } \ 267 } \
269 mga_do_dma_wrap_end( dev_priv ); \ 268 mga_do_dma_wrap_end(dev_priv); \
270 } \ 269 } \
271} while (0) 270} while (0)
272 271
@@ -280,12 +279,12 @@ do { \
280 279
281#define DMA_BLOCK_SIZE (5 * sizeof(u32)) 280#define DMA_BLOCK_SIZE (5 * sizeof(u32))
282 281
283#define BEGIN_DMA( n ) \ 282#define BEGIN_DMA(n) \
284do { \ 283do { \
285 if ( MGA_VERBOSE ) { \ 284 if (MGA_VERBOSE) { \
286 DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \ 285 DRM_INFO("BEGIN_DMA(%d)\n", (n)); \
287 DRM_INFO( " space=0x%x req=0x%Zx\n", \ 286 DRM_INFO(" space=0x%x req=0x%Zx\n", \
288 dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \ 287 dev_priv->prim.space, (n) * DMA_BLOCK_SIZE); \
289 } \ 288 } \
290 prim = dev_priv->prim.start; \ 289 prim = dev_priv->prim.start; \
291 write = dev_priv->prim.tail; \ 290 write = dev_priv->prim.tail; \
@@ -293,9 +292,9 @@ do { \
293 292
294#define BEGIN_DMA_WRAP() \ 293#define BEGIN_DMA_WRAP() \
295do { \ 294do { \
296 if ( MGA_VERBOSE ) { \ 295 if (MGA_VERBOSE) { \
297 DRM_INFO( "BEGIN_DMA()\n" ); \ 296 DRM_INFO("BEGIN_DMA()\n"); \
298 DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \ 297 DRM_INFO(" space=0x%x\n", dev_priv->prim.space); \
299 } \ 298 } \
300 prim = dev_priv->prim.start; \ 299 prim = dev_priv->prim.start; \
301 write = dev_priv->prim.tail; \ 300 write = dev_priv->prim.tail; \
@@ -304,72 +303,68 @@ do { \
304#define ADVANCE_DMA() \ 303#define ADVANCE_DMA() \
305do { \ 304do { \
306 dev_priv->prim.tail = write; \ 305 dev_priv->prim.tail = write; \
307 if ( MGA_VERBOSE ) { \ 306 if (MGA_VERBOSE) \
308 DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \ 307 DRM_INFO("ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
309 write, dev_priv->prim.space ); \ 308 write, dev_priv->prim.space); \
310 } \
311} while (0) 309} while (0)
312 310
313#define FLUSH_DMA() \ 311#define FLUSH_DMA() \
314do { \ 312do { \
315 if ( 0 ) { \ 313 if (0) { \
316 DRM_INFO( "\n" ); \ 314 DRM_INFO("\n"); \
317 DRM_INFO( " tail=0x%06x head=0x%06lx\n", \ 315 DRM_INFO(" tail=0x%06x head=0x%06lx\n", \
318 dev_priv->prim.tail, \ 316 dev_priv->prim.tail, \
319 (unsigned long)(MGA_READ(MGA_PRIMADDRESS) - \ 317 (unsigned long)(MGA_READ(MGA_PRIMADDRESS) - \
320 dev_priv->primary->offset)); \ 318 dev_priv->primary->offset)); \
321 } \ 319 } \
322 if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) { \ 320 if (!test_bit(0, &dev_priv->prim.wrapped)) { \
323 if ( dev_priv->prim.space < \ 321 if (dev_priv->prim.space < dev_priv->prim.high_mark) \
324 dev_priv->prim.high_mark ) { \ 322 mga_do_dma_wrap_start(dev_priv); \
325 mga_do_dma_wrap_start( dev_priv ); \ 323 else \
326 } else { \ 324 mga_do_dma_flush(dev_priv); \
327 mga_do_dma_flush( dev_priv ); \
328 } \
329 } \ 325 } \
330} while (0) 326} while (0)
331 327
332/* Never use this, always use DMA_BLOCK(...) for primary DMA output. 328/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
333 */ 329 */
334#define DMA_WRITE( offset, val ) \ 330#define DMA_WRITE(offset, val) \
335do { \ 331do { \
336 if ( MGA_VERBOSE ) { \ 332 if (MGA_VERBOSE) \
337 DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \ 333 DRM_INFO(" DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \
338 (u32)(val), write + (offset) * sizeof(u32) ); \ 334 (u32)(val), write + (offset) * sizeof(u32)); \
339 } \
340 *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \ 335 *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
341} while (0) 336} while (0)
342 337
343#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 ) \ 338#define DMA_BLOCK(reg0, val0, reg1, val1, reg2, val2, reg3, val3) \
344do { \ 339do { \
345 DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) | \ 340 DMA_WRITE(0, ((DMAREG(reg0) << 0) | \
346 (DMAREG( reg1 ) << 8) | \ 341 (DMAREG(reg1) << 8) | \
347 (DMAREG( reg2 ) << 16) | \ 342 (DMAREG(reg2) << 16) | \
348 (DMAREG( reg3 ) << 24)) ); \ 343 (DMAREG(reg3) << 24))); \
349 DMA_WRITE( 1, val0 ); \ 344 DMA_WRITE(1, val0); \
350 DMA_WRITE( 2, val1 ); \ 345 DMA_WRITE(2, val1); \
351 DMA_WRITE( 3, val2 ); \ 346 DMA_WRITE(3, val2); \
352 DMA_WRITE( 4, val3 ); \ 347 DMA_WRITE(4, val3); \
353 write += DMA_BLOCK_SIZE; \ 348 write += DMA_BLOCK_SIZE; \
354} while (0) 349} while (0)
355 350
356/* Buffer aging via primary DMA stream head pointer. 351/* Buffer aging via primary DMA stream head pointer.
357 */ 352 */
358 353
359#define SET_AGE( age, h, w ) \ 354#define SET_AGE(age, h, w) \
360do { \ 355do { \
361 (age)->head = h; \ 356 (age)->head = h; \
362 (age)->wrap = w; \ 357 (age)->wrap = w; \
363} while (0) 358} while (0)
364 359
365#define TEST_AGE( age, h, w ) ( (age)->wrap < w || \ 360#define TEST_AGE(age, h, w) ((age)->wrap < w || \
366 ( (age)->wrap == w && \ 361 ((age)->wrap == w && \
367 (age)->head < h ) ) 362 (age)->head < h))
368 363
369#define AGE_BUFFER( buf_priv ) \ 364#define AGE_BUFFER(buf_priv) \
370do { \ 365do { \
371 drm_mga_freelist_t *entry = (buf_priv)->list_entry; \ 366 drm_mga_freelist_t *entry = (buf_priv)->list_entry; \
372 if ( (buf_priv)->dispatched ) { \ 367 if ((buf_priv)->dispatched) { \
373 entry->age.head = (dev_priv->prim.tail + \ 368 entry->age.head = (dev_priv->prim.tail + \
374 dev_priv->primary->offset); \ 369 dev_priv->primary->offset); \
375 entry->age.wrap = dev_priv->sarea_priv->last_wrap; \ 370 entry->age.wrap = dev_priv->sarea_priv->last_wrap; \
@@ -681,7 +676,7 @@ do { \
681 676
682/* Simple idle test. 677/* Simple idle test.
683 */ 678 */
684static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv) 679static __inline__ int mga_is_idle(drm_mga_private_t *dev_priv)
685{ 680{
686 u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; 681 u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
687 return (status == MGA_ENDPRDMASTS); 682 return (status == MGA_ENDPRDMASTS);
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index daa6041a483a..2581202297e4 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -76,9 +76,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
76 /* In addition to clearing the interrupt-pending bit, we 76 /* In addition to clearing the interrupt-pending bit, we
77 * have to write to MGA_PRIMEND to re-start the DMA operation. 77 * have to write to MGA_PRIMEND to re-start the DMA operation.
78 */ 78 */
79 if ((prim_start & ~0x03) != (prim_end & ~0x03)) { 79 if ((prim_start & ~0x03) != (prim_end & ~0x03))
80 MGA_WRITE(MGA_PRIMEND, prim_end); 80 MGA_WRITE(MGA_PRIMEND, prim_end);
81 }
82 81
83 atomic_inc(&dev_priv->last_fence_retired); 82 atomic_inc(&dev_priv->last_fence_retired);
84 DRM_WAKEUP(&dev_priv->fence_queue); 83 DRM_WAKEUP(&dev_priv->fence_queue);
@@ -120,7 +119,7 @@ void mga_disable_vblank(struct drm_device *dev, int crtc)
120 /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */ 119 /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
121} 120}
122 121
123int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) 122int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
124{ 123{
125 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 124 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
126 unsigned int cur_fence; 125 unsigned int cur_fence;
@@ -139,7 +138,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
139 return ret; 138 return ret;
140} 139}
141 140
142void mga_driver_irq_preinstall(struct drm_device * dev) 141void mga_driver_irq_preinstall(struct drm_device *dev)
143{ 142{
144 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 143 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
145 144
@@ -162,7 +161,7 @@ int mga_driver_irq_postinstall(struct drm_device *dev)
162 return 0; 161 return 0;
163} 162}
164 163
165void mga_driver_irq_uninstall(struct drm_device * dev) 164void mga_driver_irq_uninstall(struct drm_device *dev)
166{ 165{
167 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 166 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
168 if (!dev_priv) 167 if (!dev_priv)
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index a53b848e0f17..fff82045c427 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -41,8 +41,8 @@
41 * DMA hardware state programming functions 41 * DMA hardware state programming functions
42 */ 42 */
43 43
44static void mga_emit_clip_rect(drm_mga_private_t * dev_priv, 44static void mga_emit_clip_rect(drm_mga_private_t *dev_priv,
45 struct drm_clip_rect * box) 45 struct drm_clip_rect *box)
46{ 46{
47 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 47 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
48 drm_mga_context_regs_t *ctx = &sarea_priv->context_state; 48 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -66,7 +66,7 @@ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
66 ADVANCE_DMA(); 66 ADVANCE_DMA();
67} 67}
68 68
69static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv) 69static __inline__ void mga_g200_emit_context(drm_mga_private_t *dev_priv)
70{ 70{
71 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 71 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
72 drm_mga_context_regs_t *ctx = &sarea_priv->context_state; 72 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -89,7 +89,7 @@ static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
89 ADVANCE_DMA(); 89 ADVANCE_DMA();
90} 90}
91 91
92static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv) 92static __inline__ void mga_g400_emit_context(drm_mga_private_t *dev_priv)
93{ 93{
94 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 94 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
95 drm_mga_context_regs_t *ctx = &sarea_priv->context_state; 95 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -116,7 +116,7 @@ static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv)
116 ADVANCE_DMA(); 116 ADVANCE_DMA();
117} 117}
118 118
119static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv) 119static __inline__ void mga_g200_emit_tex0(drm_mga_private_t *dev_priv)
120{ 120{
121 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 121 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
122 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; 122 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
@@ -144,7 +144,7 @@ static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv)
144 ADVANCE_DMA(); 144 ADVANCE_DMA();
145} 145}
146 146
147static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv) 147static __inline__ void mga_g400_emit_tex0(drm_mga_private_t *dev_priv)
148{ 148{
149 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 149 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
150 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; 150 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
@@ -184,7 +184,7 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
184 ADVANCE_DMA(); 184 ADVANCE_DMA();
185} 185}
186 186
187static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv) 187static __inline__ void mga_g400_emit_tex1(drm_mga_private_t *dev_priv)
188{ 188{
189 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 189 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
190 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1]; 190 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
@@ -223,7 +223,7 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
223 ADVANCE_DMA(); 223 ADVANCE_DMA();
224} 224}
225 225
226static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv) 226static __inline__ void mga_g200_emit_pipe(drm_mga_private_t *dev_priv)
227{ 227{
228 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 228 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
229 unsigned int pipe = sarea_priv->warp_pipe; 229 unsigned int pipe = sarea_priv->warp_pipe;
@@ -250,7 +250,7 @@ static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
250 ADVANCE_DMA(); 250 ADVANCE_DMA();
251} 251}
252 252
253static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv) 253static __inline__ void mga_g400_emit_pipe(drm_mga_private_t *dev_priv)
254{ 254{
255 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 255 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
256 unsigned int pipe = sarea_priv->warp_pipe; 256 unsigned int pipe = sarea_priv->warp_pipe;
@@ -327,7 +327,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
327 ADVANCE_DMA(); 327 ADVANCE_DMA();
328} 328}
329 329
330static void mga_g200_emit_state(drm_mga_private_t * dev_priv) 330static void mga_g200_emit_state(drm_mga_private_t *dev_priv)
331{ 331{
332 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 332 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
333 unsigned int dirty = sarea_priv->dirty; 333 unsigned int dirty = sarea_priv->dirty;
@@ -348,7 +348,7 @@ static void mga_g200_emit_state(drm_mga_private_t * dev_priv)
348 } 348 }
349} 349}
350 350
351static void mga_g400_emit_state(drm_mga_private_t * dev_priv) 351static void mga_g400_emit_state(drm_mga_private_t *dev_priv)
352{ 352{
353 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 353 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
354 unsigned int dirty = sarea_priv->dirty; 354 unsigned int dirty = sarea_priv->dirty;
@@ -381,7 +381,7 @@ static void mga_g400_emit_state(drm_mga_private_t * dev_priv)
381 381
382/* Disallow all write destinations except the front and backbuffer. 382/* Disallow all write destinations except the front and backbuffer.
383 */ 383 */
384static int mga_verify_context(drm_mga_private_t * dev_priv) 384static int mga_verify_context(drm_mga_private_t *dev_priv)
385{ 385{
386 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 386 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
387 drm_mga_context_regs_t *ctx = &sarea_priv->context_state; 387 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -400,7 +400,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv)
400 400
401/* Disallow texture reads from PCI space. 401/* Disallow texture reads from PCI space.
402 */ 402 */
403static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit) 403static int mga_verify_tex(drm_mga_private_t *dev_priv, int unit)
404{ 404{
405 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 405 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
406 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit]; 406 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
@@ -417,7 +417,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
417 return 0; 417 return 0;
418} 418}
419 419
420static int mga_verify_state(drm_mga_private_t * dev_priv) 420static int mga_verify_state(drm_mga_private_t *dev_priv)
421{ 421{
422 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 422 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
423 unsigned int dirty = sarea_priv->dirty; 423 unsigned int dirty = sarea_priv->dirty;
@@ -446,7 +446,7 @@ static int mga_verify_state(drm_mga_private_t * dev_priv)
446 return (ret == 0); 446 return (ret == 0);
447} 447}
448 448
449static int mga_verify_iload(drm_mga_private_t * dev_priv, 449static int mga_verify_iload(drm_mga_private_t *dev_priv,
450 unsigned int dstorg, unsigned int length) 450 unsigned int dstorg, unsigned int length)
451{ 451{
452 if (dstorg < dev_priv->texture_offset || 452 if (dstorg < dev_priv->texture_offset ||
@@ -465,7 +465,7 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv,
465 return 0; 465 return 0;
466} 466}
467 467
468static int mga_verify_blit(drm_mga_private_t * dev_priv, 468static int mga_verify_blit(drm_mga_private_t *dev_priv,
469 unsigned int srcorg, unsigned int dstorg) 469 unsigned int srcorg, unsigned int dstorg)
470{ 470{
471 if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || 471 if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
@@ -480,7 +480,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
480 * 480 *
481 */ 481 */
482 482
483static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear) 483static void mga_dma_dispatch_clear(struct drm_device *dev, drm_mga_clear_t *clear)
484{ 484{
485 drm_mga_private_t *dev_priv = dev->dev_private; 485 drm_mga_private_t *dev_priv = dev->dev_private;
486 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 486 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -568,7 +568,7 @@ static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * cl
568 FLUSH_DMA(); 568 FLUSH_DMA();
569} 569}
570 570
571static void mga_dma_dispatch_swap(struct drm_device * dev) 571static void mga_dma_dispatch_swap(struct drm_device *dev)
572{ 572{
573 drm_mga_private_t *dev_priv = dev->dev_private; 573 drm_mga_private_t *dev_priv = dev->dev_private;
574 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 574 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -622,7 +622,7 @@ static void mga_dma_dispatch_swap(struct drm_device * dev)
622 DRM_DEBUG("... done.\n"); 622 DRM_DEBUG("... done.\n");
623} 623}
624 624
625static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) 625static void mga_dma_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
626{ 626{
627 drm_mga_private_t *dev_priv = dev->dev_private; 627 drm_mga_private_t *dev_priv = dev->dev_private;
628 drm_mga_buf_priv_t *buf_priv = buf->dev_private; 628 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -669,7 +669,7 @@ static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * bu
669 FLUSH_DMA(); 669 FLUSH_DMA();
670} 670}
671 671
672static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf, 672static void mga_dma_dispatch_indices(struct drm_device *dev, struct drm_buf *buf,
673 unsigned int start, unsigned int end) 673 unsigned int start, unsigned int end)
674{ 674{
675 drm_mga_private_t *dev_priv = dev->dev_private; 675 drm_mga_private_t *dev_priv = dev->dev_private;
@@ -718,7 +718,7 @@ static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * b
718/* This copies a 64 byte aligned agp region to the frambuffer with a 718/* This copies a 64 byte aligned agp region to the frambuffer with a
719 * standard blit, the ioctl needs to do checking. 719 * standard blit, the ioctl needs to do checking.
720 */ 720 */
721static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf, 721static void mga_dma_dispatch_iload(struct drm_device *dev, struct drm_buf *buf,
722 unsigned int dstorg, unsigned int length) 722 unsigned int dstorg, unsigned int length)
723{ 723{
724 drm_mga_private_t *dev_priv = dev->dev_private; 724 drm_mga_private_t *dev_priv = dev->dev_private;
@@ -766,7 +766,7 @@ static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf
766 FLUSH_DMA(); 766 FLUSH_DMA();
767} 767}
768 768
769static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit) 769static void mga_dma_dispatch_blit(struct drm_device *dev, drm_mga_blit_t *blit)
770{ 770{
771 drm_mga_private_t *dev_priv = dev->dev_private; 771 drm_mga_private_t *dev_priv = dev->dev_private;
772 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 772 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -801,9 +801,8 @@ static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit
801 int w = pbox[i].x2 - pbox[i].x1 - 1; 801 int w = pbox[i].x2 - pbox[i].x1 - 1;
802 int start; 802 int start;
803 803
804 if (blit->ydir == -1) { 804 if (blit->ydir == -1)
805 srcy = blit->height - srcy - 1; 805 srcy = blit->height - srcy - 1;
806 }
807 806
808 start = srcy * blit->src_pitch + srcx; 807 start = srcy * blit->src_pitch + srcx;
809 808
diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
index 9aad4847afdf..f172bd5c257f 100644
--- a/drivers/gpu/drm/mga/mga_warp.c
+++ b/drivers/gpu/drm/mga/mga_warp.c
@@ -46,7 +46,7 @@ MODULE_FIRMWARE(FIRMWARE_G400);
46 46
47#define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN) 47#define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN)
48 48
49int mga_warp_install_microcode(drm_mga_private_t * dev_priv) 49int mga_warp_install_microcode(drm_mga_private_t *dev_priv)
50{ 50{
51 unsigned char *vcbase = dev_priv->warp->handle; 51 unsigned char *vcbase = dev_priv->warp->handle;
52 unsigned long pcbase = dev_priv->warp->offset; 52 unsigned long pcbase = dev_priv->warp->offset;
@@ -133,7 +133,7 @@ out:
133 133
134#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE) 134#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
135 135
136int mga_warp_init(drm_mga_private_t * dev_priv) 136int mga_warp_init(drm_mga_private_t *dev_priv)
137{ 137{
138 u32 wmisc; 138 u32 wmisc;
139 139
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index b6f5239c2efb..d2d28048efb2 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -41,4 +41,13 @@ config DRM_I2C_CH7006
41 41
42 This driver is currently only useful if you're also using 42 This driver is currently only useful if you're also using
43 the nouveau driver. 43 the nouveau driver.
44
45config DRM_I2C_SIL164
46 tristate "Silicon Image sil164 TMDS transmitter"
47 default m if DRM_NOUVEAU
48 help
49 Support for sil164 and similar single-link (or dual-link
50 when used in pairs) TMDS transmitters, used in some nVidia
51 video cards.
52
44endmenu 53endmenu
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index e671d0e74d4c..570e190710bd 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -44,7 +44,7 @@
44 44
45MODULE_FIRMWARE(FIRMWARE_NAME); 45MODULE_FIRMWARE(FIRMWARE_NAME);
46 46
47static int R128_READ_PLL(struct drm_device * dev, int addr) 47static int R128_READ_PLL(struct drm_device *dev, int addr)
48{ 48{
49 drm_r128_private_t *dev_priv = dev->dev_private; 49 drm_r128_private_t *dev_priv = dev->dev_private;
50 50
@@ -53,7 +53,7 @@ static int R128_READ_PLL(struct drm_device * dev, int addr)
53} 53}
54 54
55#if R128_FIFO_DEBUG 55#if R128_FIFO_DEBUG
56static void r128_status(drm_r128_private_t * dev_priv) 56static void r128_status(drm_r128_private_t *dev_priv)
57{ 57{
58 printk("GUI_STAT = 0x%08x\n", 58 printk("GUI_STAT = 0x%08x\n",
59 (unsigned int)R128_READ(R128_GUI_STAT)); 59 (unsigned int)R128_READ(R128_GUI_STAT));
@@ -74,7 +74,7 @@ static void r128_status(drm_r128_private_t * dev_priv)
74 * Engine, FIFO control 74 * Engine, FIFO control
75 */ 75 */
76 76
77static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv) 77static int r128_do_pixcache_flush(drm_r128_private_t *dev_priv)
78{ 78{
79 u32 tmp; 79 u32 tmp;
80 int i; 80 int i;
@@ -83,9 +83,8 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
83 R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp); 83 R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
84 84
85 for (i = 0; i < dev_priv->usec_timeout; i++) { 85 for (i = 0; i < dev_priv->usec_timeout; i++) {
86 if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) { 86 if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY))
87 return 0; 87 return 0;
88 }
89 DRM_UDELAY(1); 88 DRM_UDELAY(1);
90 } 89 }
91 90
@@ -95,7 +94,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
95 return -EBUSY; 94 return -EBUSY;
96} 95}
97 96
98static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) 97static int r128_do_wait_for_fifo(drm_r128_private_t *dev_priv, int entries)
99{ 98{
100 int i; 99 int i;
101 100
@@ -112,7 +111,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
112 return -EBUSY; 111 return -EBUSY;
113} 112}
114 113
115static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) 114static int r128_do_wait_for_idle(drm_r128_private_t *dev_priv)
116{ 115{
117 int i, ret; 116 int i, ret;
118 117
@@ -189,7 +188,7 @@ out_release:
189 * prior to a wait for idle, as it informs the engine that the command 188 * prior to a wait for idle, as it informs the engine that the command
190 * stream is ending. 189 * stream is ending.
191 */ 190 */
192static void r128_do_cce_flush(drm_r128_private_t * dev_priv) 191static void r128_do_cce_flush(drm_r128_private_t *dev_priv)
193{ 192{
194 u32 tmp; 193 u32 tmp;
195 194
@@ -199,7 +198,7 @@ static void r128_do_cce_flush(drm_r128_private_t * dev_priv)
199 198
200/* Wait for the CCE to go idle. 199/* Wait for the CCE to go idle.
201 */ 200 */
202int r128_do_cce_idle(drm_r128_private_t * dev_priv) 201int r128_do_cce_idle(drm_r128_private_t *dev_priv)
203{ 202{
204 int i; 203 int i;
205 204
@@ -225,7 +224,7 @@ int r128_do_cce_idle(drm_r128_private_t * dev_priv)
225 224
226/* Start the Concurrent Command Engine. 225/* Start the Concurrent Command Engine.
227 */ 226 */
228static void r128_do_cce_start(drm_r128_private_t * dev_priv) 227static void r128_do_cce_start(drm_r128_private_t *dev_priv)
229{ 228{
230 r128_do_wait_for_idle(dev_priv); 229 r128_do_wait_for_idle(dev_priv);
231 230
@@ -242,7 +241,7 @@ static void r128_do_cce_start(drm_r128_private_t * dev_priv)
242 * commands, so you must wait for the CCE command stream to complete 241 * commands, so you must wait for the CCE command stream to complete
243 * before calling this routine. 242 * before calling this routine.
244 */ 243 */
245static void r128_do_cce_reset(drm_r128_private_t * dev_priv) 244static void r128_do_cce_reset(drm_r128_private_t *dev_priv)
246{ 245{
247 R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0); 246 R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
248 R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0); 247 R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
@@ -253,7 +252,7 @@ static void r128_do_cce_reset(drm_r128_private_t * dev_priv)
253 * commands, so you must flush the command stream and wait for the CCE 252 * commands, so you must flush the command stream and wait for the CCE
254 * to go idle before calling this routine. 253 * to go idle before calling this routine.
255 */ 254 */
256static void r128_do_cce_stop(drm_r128_private_t * dev_priv) 255static void r128_do_cce_stop(drm_r128_private_t *dev_priv)
257{ 256{
258 R128_WRITE(R128_PM4_MICRO_CNTL, 0); 257 R128_WRITE(R128_PM4_MICRO_CNTL, 0);
259 R128_WRITE(R128_PM4_BUFFER_CNTL, 258 R128_WRITE(R128_PM4_BUFFER_CNTL,
@@ -264,7 +263,7 @@ static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
264 263
265/* Reset the engine. This will stop the CCE if it is running. 264/* Reset the engine. This will stop the CCE if it is running.
266 */ 265 */
267static int r128_do_engine_reset(struct drm_device * dev) 266static int r128_do_engine_reset(struct drm_device *dev)
268{ 267{
269 drm_r128_private_t *dev_priv = dev->dev_private; 268 drm_r128_private_t *dev_priv = dev->dev_private;
270 u32 clock_cntl_index, mclk_cntl, gen_reset_cntl; 269 u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
@@ -301,8 +300,8 @@ static int r128_do_engine_reset(struct drm_device * dev)
301 return 0; 300 return 0;
302} 301}
303 302
304static void r128_cce_init_ring_buffer(struct drm_device * dev, 303static void r128_cce_init_ring_buffer(struct drm_device *dev,
305 drm_r128_private_t * dev_priv) 304 drm_r128_private_t *dev_priv)
306{ 305{
307 u32 ring_start; 306 u32 ring_start;
308 u32 tmp; 307 u32 tmp;
@@ -340,7 +339,7 @@ static void r128_cce_init_ring_buffer(struct drm_device * dev,
340 R128_WRITE(R128_BUS_CNTL, tmp); 339 R128_WRITE(R128_BUS_CNTL, tmp);
341} 340}
342 341
343static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) 342static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
344{ 343{
345 drm_r128_private_t *dev_priv; 344 drm_r128_private_t *dev_priv;
346 int rc; 345 int rc;
@@ -588,7 +587,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
588 return rc; 587 return rc;
589} 588}
590 589
591int r128_do_cleanup_cce(struct drm_device * dev) 590int r128_do_cleanup_cce(struct drm_device *dev)
592{ 591{
593 592
594 /* Make sure interrupts are disabled here because the uninstall ioctl 593 /* Make sure interrupts are disabled here because the uninstall ioctl
@@ -682,9 +681,8 @@ int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv
682 /* Flush any pending CCE commands. This ensures any outstanding 681 /* Flush any pending CCE commands. This ensures any outstanding
683 * commands are exectuted by the engine before we turn it off. 682 * commands are exectuted by the engine before we turn it off.
684 */ 683 */
685 if (stop->flush) { 684 if (stop->flush)
686 r128_do_cce_flush(dev_priv); 685 r128_do_cce_flush(dev_priv);
687 }
688 686
689 /* If we fail to make the engine go idle, we return an error 687 /* If we fail to make the engine go idle, we return an error
690 * code so that the DRM ioctl wrapper can try again. 688 * code so that the DRM ioctl wrapper can try again.
@@ -735,9 +733,8 @@ int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv
735 733
736 DEV_INIT_TEST_WITH_RETURN(dev_priv); 734 DEV_INIT_TEST_WITH_RETURN(dev_priv);
737 735
738 if (dev_priv->cce_running) { 736 if (dev_priv->cce_running)
739 r128_do_cce_flush(dev_priv); 737 r128_do_cce_flush(dev_priv);
740 }
741 738
742 return r128_do_cce_idle(dev_priv); 739 return r128_do_cce_idle(dev_priv);
743} 740}
@@ -765,7 +762,7 @@ int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_pr
765#define R128_BUFFER_FREE 0 762#define R128_BUFFER_FREE 0
766 763
767#if 0 764#if 0
768static int r128_freelist_init(struct drm_device * dev) 765static int r128_freelist_init(struct drm_device *dev)
769{ 766{
770 struct drm_device_dma *dma = dev->dma; 767 struct drm_device_dma *dma = dev->dma;
771 drm_r128_private_t *dev_priv = dev->dev_private; 768 drm_r128_private_t *dev_priv = dev->dev_private;
@@ -848,7 +845,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev)
848 return NULL; 845 return NULL;
849} 846}
850 847
851void r128_freelist_reset(struct drm_device * dev) 848void r128_freelist_reset(struct drm_device *dev)
852{ 849{
853 struct drm_device_dma *dma = dev->dma; 850 struct drm_device_dma *dma = dev->dma;
854 int i; 851 int i;
@@ -864,7 +861,7 @@ void r128_freelist_reset(struct drm_device * dev)
864 * CCE command submission 861 * CCE command submission
865 */ 862 */
866 863
867int r128_wait_ring(drm_r128_private_t * dev_priv, int n) 864int r128_wait_ring(drm_r128_private_t *dev_priv, int n)
868{ 865{
869 drm_r128_ring_buffer_t *ring = &dev_priv->ring; 866 drm_r128_ring_buffer_t *ring = &dev_priv->ring;
870 int i; 867 int i;
@@ -881,9 +878,9 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
881 return -EBUSY; 878 return -EBUSY;
882} 879}
883 880
884static int r128_cce_get_buffers(struct drm_device * dev, 881static int r128_cce_get_buffers(struct drm_device *dev,
885 struct drm_file *file_priv, 882 struct drm_file *file_priv,
886 struct drm_dma * d) 883 struct drm_dma *d)
887{ 884{
888 int i; 885 int i;
889 struct drm_buf *buf; 886 struct drm_buf *buf;
@@ -933,9 +930,8 @@ int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_p
933 930
934 d->granted_count = 0; 931 d->granted_count = 0;
935 932
936 if (d->request_count) { 933 if (d->request_count)
937 ret = r128_cce_get_buffers(dev, file_priv, d); 934 ret = r128_cce_get_buffers(dev, file_priv, d);
938 }
939 935
940 return ret; 936 return ret;
941} 937}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index b806fdcc7170..1e2971f13aa1 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -85,7 +85,7 @@ static struct drm_driver driver = {
85 .patchlevel = DRIVER_PATCHLEVEL, 85 .patchlevel = DRIVER_PATCHLEVEL,
86}; 86};
87 87
88int r128_driver_load(struct drm_device * dev, unsigned long flags) 88int r128_driver_load(struct drm_device *dev, unsigned long flags)
89{ 89{
90 return drm_vblank_init(dev, 1); 90 return drm_vblank_init(dev, 1);
91} 91}
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 3c60829d82e9..930c71b2fb5e 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -53,7 +53,7 @@
53#define DRIVER_MINOR 5 53#define DRIVER_MINOR 5
54#define DRIVER_PATCHLEVEL 0 54#define DRIVER_PATCHLEVEL 0
55 55
56#define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR ) 56#define GET_RING_HEAD(dev_priv) R128_READ(R128_PM4_BUFFER_DL_RPTR)
57 57
58typedef struct drm_r128_freelist { 58typedef struct drm_r128_freelist {
59 unsigned int age; 59 unsigned int age;
@@ -144,23 +144,23 @@ extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file
144extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); 144extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
145extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); 145extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
146 146
147extern void r128_freelist_reset(struct drm_device * dev); 147extern void r128_freelist_reset(struct drm_device *dev);
148 148
149extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n); 149extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n);
150 150
151extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); 151extern int r128_do_cce_idle(drm_r128_private_t *dev_priv);
152extern int r128_do_cleanup_cce(struct drm_device * dev); 152extern int r128_do_cleanup_cce(struct drm_device *dev);
153 153
154extern int r128_enable_vblank(struct drm_device *dev, int crtc); 154extern int r128_enable_vblank(struct drm_device *dev, int crtc);
155extern void r128_disable_vblank(struct drm_device *dev, int crtc); 155extern void r128_disable_vblank(struct drm_device *dev, int crtc);
156extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); 156extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
157extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); 157extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
158extern void r128_driver_irq_preinstall(struct drm_device * dev); 158extern void r128_driver_irq_preinstall(struct drm_device *dev);
159extern int r128_driver_irq_postinstall(struct drm_device *dev); 159extern int r128_driver_irq_postinstall(struct drm_device *dev);
160extern void r128_driver_irq_uninstall(struct drm_device * dev); 160extern void r128_driver_irq_uninstall(struct drm_device *dev);
161extern void r128_driver_lastclose(struct drm_device * dev); 161extern void r128_driver_lastclose(struct drm_device *dev);
162extern int r128_driver_load(struct drm_device * dev, unsigned long flags); 162extern int r128_driver_load(struct drm_device *dev, unsigned long flags);
163extern void r128_driver_preclose(struct drm_device * dev, 163extern void r128_driver_preclose(struct drm_device *dev,
164 struct drm_file *file_priv); 164 struct drm_file *file_priv);
165 165
166extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, 166extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -390,27 +390,27 @@ extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
390 390
391#define R128_PCIGART_TABLE_SIZE 32768 391#define R128_PCIGART_TABLE_SIZE 32768
392 392
393#define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) 393#define R128_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
394#define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) 394#define R128_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
395#define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) 395#define R128_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
396#define R128_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) 396#define R128_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
397 397
398#define R128_WRITE_PLL(addr,val) \ 398#define R128_WRITE_PLL(addr, val) \
399do { \ 399do { \
400 R128_WRITE8(R128_CLOCK_CNTL_INDEX, \ 400 R128_WRITE8(R128_CLOCK_CNTL_INDEX, \
401 ((addr) & 0x1f) | R128_PLL_WR_EN); \ 401 ((addr) & 0x1f) | R128_PLL_WR_EN); \
402 R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \ 402 R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \
403} while (0) 403} while (0)
404 404
405#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \ 405#define CCE_PACKET0(reg, n) (R128_CCE_PACKET0 | \
406 ((n) << 16) | ((reg) >> 2)) 406 ((n) << 16) | ((reg) >> 2))
407#define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \ 407#define CCE_PACKET1(reg0, reg1) (R128_CCE_PACKET1 | \
408 (((reg1) >> 2) << 11) | ((reg0) >> 2)) 408 (((reg1) >> 2) << 11) | ((reg0) >> 2))
409#define CCE_PACKET2() (R128_CCE_PACKET2) 409#define CCE_PACKET2() (R128_CCE_PACKET2)
410#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \ 410#define CCE_PACKET3(pkt, n) (R128_CCE_PACKET3 | \
411 (pkt) | ((n) << 16)) 411 (pkt) | ((n) << 16))
412 412
413static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv) 413static __inline__ void r128_update_ring_snapshot(drm_r128_private_t *dev_priv)
414{ 414{
415 drm_r128_ring_buffer_t *ring = &dev_priv->ring; 415 drm_r128_ring_buffer_t *ring = &dev_priv->ring;
416 ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32); 416 ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
@@ -430,37 +430,38 @@ do { \
430 } \ 430 } \
431} while (0) 431} while (0)
432 432
433#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ 433#define RING_SPACE_TEST_WITH_RETURN(dev_priv) \
434do { \ 434do { \
435 drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \ 435 drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \
436 if ( ring->space < ring->high_mark ) { \ 436 if (ring->space < ring->high_mark) { \
437 for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \ 437 for (i = 0 ; i < dev_priv->usec_timeout ; i++) { \
438 r128_update_ring_snapshot( dev_priv ); \ 438 r128_update_ring_snapshot(dev_priv); \
439 if ( ring->space >= ring->high_mark ) \ 439 if (ring->space >= ring->high_mark) \
440 goto __ring_space_done; \ 440 goto __ring_space_done; \
441 DRM_UDELAY(1); \ 441 DRM_UDELAY(1); \
442 } \ 442 } \
443 DRM_ERROR( "ring space check failed!\n" ); \ 443 DRM_ERROR("ring space check failed!\n"); \
444 return -EBUSY; \ 444 return -EBUSY; \
445 } \ 445 } \
446 __ring_space_done: \ 446 __ring_space_done: \
447 ; \ 447 ; \
448} while (0) 448} while (0)
449 449
450#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ 450#define VB_AGE_TEST_WITH_RETURN(dev_priv) \
451do { \ 451do { \
452 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \ 452 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \
453 if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) { \ 453 if (sarea_priv->last_dispatch >= R128_MAX_VB_AGE) { \
454 int __ret = r128_do_cce_idle( dev_priv ); \ 454 int __ret = r128_do_cce_idle(dev_priv); \
455 if ( __ret ) return __ret; \ 455 if (__ret) \
456 return __ret; \
456 sarea_priv->last_dispatch = 0; \ 457 sarea_priv->last_dispatch = 0; \
457 r128_freelist_reset( dev ); \ 458 r128_freelist_reset(dev); \
458 } \ 459 } \
459} while (0) 460} while (0)
460 461
461#define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \ 462#define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \
462 OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) ); \ 463 OUT_RING(CCE_PACKET0(R128_WAIT_UNTIL, 0)); \
463 OUT_RING( R128_EVENT_CRTC_OFFSET ); \ 464 OUT_RING(R128_EVENT_CRTC_OFFSET); \
464} while (0) 465} while (0)
465 466
466/* ================================================================ 467/* ================================================================
@@ -472,13 +473,12 @@ do { \
472#define RING_LOCALS \ 473#define RING_LOCALS \
473 int write, _nr; unsigned int tail_mask; volatile u32 *ring; 474 int write, _nr; unsigned int tail_mask; volatile u32 *ring;
474 475
475#define BEGIN_RING( n ) do { \ 476#define BEGIN_RING(n) do { \
476 if ( R128_VERBOSE ) { \ 477 if (R128_VERBOSE) \
477 DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ 478 DRM_INFO("BEGIN_RING(%d)\n", (n)); \
478 } \ 479 if (dev_priv->ring.space <= (n) * sizeof(u32)) { \
479 if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
480 COMMIT_RING(); \ 480 COMMIT_RING(); \
481 r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \ 481 r128_wait_ring(dev_priv, (n) * sizeof(u32)); \
482 } \ 482 } \
483 _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ 483 _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
484 ring = dev_priv->ring.start; \ 484 ring = dev_priv->ring.start; \
@@ -494,40 +494,36 @@ do { \
494#define R128_BROKEN_CCE 1 494#define R128_BROKEN_CCE 1
495 495
496#define ADVANCE_RING() do { \ 496#define ADVANCE_RING() do { \
497 if ( R128_VERBOSE ) { \ 497 if (R128_VERBOSE) \
498 DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ 498 DRM_INFO("ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
499 write, dev_priv->ring.tail ); \ 499 write, dev_priv->ring.tail); \
500 } \ 500 if (R128_BROKEN_CCE && write < 32) \
501 if ( R128_BROKEN_CCE && write < 32 ) { \ 501 memcpy(dev_priv->ring.end, \
502 memcpy( dev_priv->ring.end, \ 502 dev_priv->ring.start, \
503 dev_priv->ring.start, \ 503 write * sizeof(u32)); \
504 write * sizeof(u32) ); \ 504 if (((dev_priv->ring.tail + _nr) & tail_mask) != write) \
505 } \
506 if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
507 DRM_ERROR( \ 505 DRM_ERROR( \
508 "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ 506 "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
509 ((dev_priv->ring.tail + _nr) & tail_mask), \ 507 ((dev_priv->ring.tail + _nr) & tail_mask), \
510 write, __LINE__); \ 508 write, __LINE__); \
511 } else \ 509 else \
512 dev_priv->ring.tail = write; \ 510 dev_priv->ring.tail = write; \
513} while (0) 511} while (0)
514 512
515#define COMMIT_RING() do { \ 513#define COMMIT_RING() do { \
516 if ( R128_VERBOSE ) { \ 514 if (R128_VERBOSE) \
517 DRM_INFO( "COMMIT_RING() tail=0x%06x\n", \ 515 DRM_INFO("COMMIT_RING() tail=0x%06x\n", \
518 dev_priv->ring.tail ); \ 516 dev_priv->ring.tail); \
519 } \
520 DRM_MEMORYBARRIER(); \ 517 DRM_MEMORYBARRIER(); \
521 R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail ); \ 518 R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail); \
522 R128_READ( R128_PM4_BUFFER_DL_WPTR ); \ 519 R128_READ(R128_PM4_BUFFER_DL_WPTR); \
523} while (0) 520} while (0)
524 521
525#define OUT_RING( x ) do { \ 522#define OUT_RING(x) do { \
526 if ( R128_VERBOSE ) { \ 523 if (R128_VERBOSE) \
527 DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ 524 DRM_INFO(" OUT_RING( 0x%08x ) at 0x%x\n", \
528 (unsigned int)(x), write ); \ 525 (unsigned int)(x), write); \
529 } \ 526 ring[write++] = cpu_to_le32(x); \
530 ring[write++] = cpu_to_le32( x ); \
531 write &= tail_mask; \ 527 write &= tail_mask; \
532} while (0) 528} while (0)
533 529
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index 69810fb8ac49..429d5a02695f 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -90,7 +90,7 @@ void r128_disable_vblank(struct drm_device *dev, int crtc)
90 */ 90 */
91} 91}
92 92
93void r128_driver_irq_preinstall(struct drm_device * dev) 93void r128_driver_irq_preinstall(struct drm_device *dev)
94{ 94{
95 drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; 95 drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
96 96
@@ -105,7 +105,7 @@ int r128_driver_irq_postinstall(struct drm_device *dev)
105 return 0; 105 return 0;
106} 106}
107 107
108void r128_driver_irq_uninstall(struct drm_device * dev) 108void r128_driver_irq_uninstall(struct drm_device *dev)
109{ 109{
110 drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; 110 drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
111 if (!dev_priv) 111 if (!dev_priv)
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index af2665cf4718..077af1f2f9b4 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -37,8 +37,8 @@
37 * CCE hardware state programming functions 37 * CCE hardware state programming functions
38 */ 38 */
39 39
40static void r128_emit_clip_rects(drm_r128_private_t * dev_priv, 40static void r128_emit_clip_rects(drm_r128_private_t *dev_priv,
41 struct drm_clip_rect * boxes, int count) 41 struct drm_clip_rect *boxes, int count)
42{ 42{
43 u32 aux_sc_cntl = 0x00000000; 43 u32 aux_sc_cntl = 0x00000000;
44 RING_LOCALS; 44 RING_LOCALS;
@@ -80,7 +80,7 @@ static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
80 ADVANCE_RING(); 80 ADVANCE_RING();
81} 81}
82 82
83static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv) 83static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv)
84{ 84{
85 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 85 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
86 drm_r128_context_regs_t *ctx = &sarea_priv->context_state; 86 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -95,7 +95,7 @@ static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
95 ADVANCE_RING(); 95 ADVANCE_RING();
96} 96}
97 97
98static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv) 98static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv)
99{ 99{
100 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 100 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
101 drm_r128_context_regs_t *ctx = &sarea_priv->context_state; 101 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -121,7 +121,7 @@ static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
121 ADVANCE_RING(); 121 ADVANCE_RING();
122} 122}
123 123
124static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv) 124static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv)
125{ 125{
126 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 126 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
127 drm_r128_context_regs_t *ctx = &sarea_priv->context_state; 127 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -137,7 +137,7 @@ static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
137 ADVANCE_RING(); 137 ADVANCE_RING();
138} 138}
139 139
140static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv) 140static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv)
141{ 141{
142 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 142 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
143 drm_r128_context_regs_t *ctx = &sarea_priv->context_state; 143 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -156,7 +156,7 @@ static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
156 ADVANCE_RING(); 156 ADVANCE_RING();
157} 157}
158 158
159static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv) 159static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv)
160{ 160{
161 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 161 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
162 drm_r128_context_regs_t *ctx = &sarea_priv->context_state; 162 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -171,7 +171,7 @@ static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
171 ADVANCE_RING(); 171 ADVANCE_RING();
172} 172}
173 173
174static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv) 174static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv)
175{ 175{
176 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 176 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
177 drm_r128_context_regs_t *ctx = &sarea_priv->context_state; 177 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -187,9 +187,8 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
187 OUT_RING(tex->tex_cntl); 187 OUT_RING(tex->tex_cntl);
188 OUT_RING(tex->tex_combine_cntl); 188 OUT_RING(tex->tex_combine_cntl);
189 OUT_RING(ctx->tex_size_pitch_c); 189 OUT_RING(ctx->tex_size_pitch_c);
190 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) { 190 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
191 OUT_RING(tex->tex_offset[i]); 191 OUT_RING(tex->tex_offset[i]);
192 }
193 192
194 OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1)); 193 OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
195 OUT_RING(ctx->constant_color_c); 194 OUT_RING(ctx->constant_color_c);
@@ -198,7 +197,7 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
198 ADVANCE_RING(); 197 ADVANCE_RING();
199} 198}
200 199
201static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv) 200static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv)
202{ 201{
203 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 202 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
204 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1]; 203 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
@@ -211,9 +210,8 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
211 OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS)); 210 OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
212 OUT_RING(tex->tex_cntl); 211 OUT_RING(tex->tex_cntl);
213 OUT_RING(tex->tex_combine_cntl); 212 OUT_RING(tex->tex_combine_cntl);
214 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) { 213 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
215 OUT_RING(tex->tex_offset[i]); 214 OUT_RING(tex->tex_offset[i]);
216 }
217 215
218 OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0)); 216 OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
219 OUT_RING(tex->tex_border_color); 217 OUT_RING(tex->tex_border_color);
@@ -221,7 +219,7 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
221 ADVANCE_RING(); 219 ADVANCE_RING();
222} 220}
223 221
224static void r128_emit_state(drm_r128_private_t * dev_priv) 222static void r128_emit_state(drm_r128_private_t *dev_priv)
225{ 223{
226 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 224 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
227 unsigned int dirty = sarea_priv->dirty; 225 unsigned int dirty = sarea_priv->dirty;
@@ -274,7 +272,7 @@ static void r128_emit_state(drm_r128_private_t * dev_priv)
274 * Performance monitoring functions 272 * Performance monitoring functions
275 */ 273 */
276 274
277static void r128_clear_box(drm_r128_private_t * dev_priv, 275static void r128_clear_box(drm_r128_private_t *dev_priv,
278 int x, int y, int w, int h, int r, int g, int b) 276 int x, int y, int w, int h, int r, int g, int b)
279{ 277{
280 u32 pitch, offset; 278 u32 pitch, offset;
@@ -321,13 +319,12 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
321 ADVANCE_RING(); 319 ADVANCE_RING();
322} 320}
323 321
324static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv) 322static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
325{ 323{
326 if (atomic_read(&dev_priv->idle_count) == 0) { 324 if (atomic_read(&dev_priv->idle_count) == 0)
327 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); 325 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
328 } else { 326 else
329 atomic_set(&dev_priv->idle_count, 0); 327 atomic_set(&dev_priv->idle_count, 0);
330 }
331} 328}
332 329
333#endif 330#endif
@@ -352,8 +349,8 @@ static void r128_print_dirty(const char *msg, unsigned int flags)
352 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : ""); 349 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
353} 350}
354 351
355static void r128_cce_dispatch_clear(struct drm_device * dev, 352static void r128_cce_dispatch_clear(struct drm_device *dev,
356 drm_r128_clear_t * clear) 353 drm_r128_clear_t *clear)
357{ 354{
358 drm_r128_private_t *dev_priv = dev->dev_private; 355 drm_r128_private_t *dev_priv = dev->dev_private;
359 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 356 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -458,7 +455,7 @@ static void r128_cce_dispatch_clear(struct drm_device * dev,
458 } 455 }
459} 456}
460 457
461static void r128_cce_dispatch_swap(struct drm_device * dev) 458static void r128_cce_dispatch_swap(struct drm_device *dev)
462{ 459{
463 drm_r128_private_t *dev_priv = dev->dev_private; 460 drm_r128_private_t *dev_priv = dev->dev_private;
464 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 461 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -524,7 +521,7 @@ static void r128_cce_dispatch_swap(struct drm_device * dev)
524 ADVANCE_RING(); 521 ADVANCE_RING();
525} 522}
526 523
527static void r128_cce_dispatch_flip(struct drm_device * dev) 524static void r128_cce_dispatch_flip(struct drm_device *dev)
528{ 525{
529 drm_r128_private_t *dev_priv = dev->dev_private; 526 drm_r128_private_t *dev_priv = dev->dev_private;
530 RING_LOCALS; 527 RING_LOCALS;
@@ -542,11 +539,10 @@ static void r128_cce_dispatch_flip(struct drm_device * dev)
542 R128_WAIT_UNTIL_PAGE_FLIPPED(); 539 R128_WAIT_UNTIL_PAGE_FLIPPED();
543 OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0)); 540 OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
544 541
545 if (dev_priv->current_page == 0) { 542 if (dev_priv->current_page == 0)
546 OUT_RING(dev_priv->back_offset); 543 OUT_RING(dev_priv->back_offset);
547 } else { 544 else
548 OUT_RING(dev_priv->front_offset); 545 OUT_RING(dev_priv->front_offset);
549 }
550 546
551 ADVANCE_RING(); 547 ADVANCE_RING();
552 548
@@ -566,7 +562,7 @@ static void r128_cce_dispatch_flip(struct drm_device * dev)
566 ADVANCE_RING(); 562 ADVANCE_RING();
567} 563}
568 564
569static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) 565static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
570{ 566{
571 drm_r128_private_t *dev_priv = dev->dev_private; 567 drm_r128_private_t *dev_priv = dev->dev_private;
572 drm_r128_buf_priv_t *buf_priv = buf->dev_private; 568 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -585,9 +581,8 @@ static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * b
585 if (buf->used) { 581 if (buf->used) {
586 buf_priv->dispatched = 1; 582 buf_priv->dispatched = 1;
587 583
588 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) { 584 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
589 r128_emit_state(dev_priv); 585 r128_emit_state(dev_priv);
590 }
591 586
592 do { 587 do {
593 /* Emit the next set of up to three cliprects */ 588 /* Emit the next set of up to three cliprects */
@@ -636,8 +631,8 @@ static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * b
636 sarea_priv->nbox = 0; 631 sarea_priv->nbox = 0;
637} 632}
638 633
639static void r128_cce_dispatch_indirect(struct drm_device * dev, 634static void r128_cce_dispatch_indirect(struct drm_device *dev,
640 struct drm_buf * buf, int start, int end) 635 struct drm_buf *buf, int start, int end)
641{ 636{
642 drm_r128_private_t *dev_priv = dev->dev_private; 637 drm_r128_private_t *dev_priv = dev->dev_private;
643 drm_r128_buf_priv_t *buf_priv = buf->dev_private; 638 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -691,8 +686,8 @@ static void r128_cce_dispatch_indirect(struct drm_device * dev,
691 dev_priv->sarea_priv->last_dispatch++; 686 dev_priv->sarea_priv->last_dispatch++;
692} 687}
693 688
694static void r128_cce_dispatch_indices(struct drm_device * dev, 689static void r128_cce_dispatch_indices(struct drm_device *dev,
695 struct drm_buf * buf, 690 struct drm_buf *buf,
696 int start, int end, int count) 691 int start, int end, int count)
697{ 692{
698 drm_r128_private_t *dev_priv = dev->dev_private; 693 drm_r128_private_t *dev_priv = dev->dev_private;
@@ -713,9 +708,8 @@ static void r128_cce_dispatch_indices(struct drm_device * dev,
713 if (start != end) { 708 if (start != end) {
714 buf_priv->dispatched = 1; 709 buf_priv->dispatched = 1;
715 710
716 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) { 711 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
717 r128_emit_state(dev_priv); 712 r128_emit_state(dev_priv);
718 }
719 713
720 dwords = (end - start + 3) / sizeof(u32); 714 dwords = (end - start + 3) / sizeof(u32);
721 715
@@ -775,9 +769,9 @@ static void r128_cce_dispatch_indices(struct drm_device * dev,
775 sarea_priv->nbox = 0; 769 sarea_priv->nbox = 0;
776} 770}
777 771
778static int r128_cce_dispatch_blit(struct drm_device * dev, 772static int r128_cce_dispatch_blit(struct drm_device *dev,
779 struct drm_file *file_priv, 773 struct drm_file *file_priv,
780 drm_r128_blit_t * blit) 774 drm_r128_blit_t *blit)
781{ 775{
782 drm_r128_private_t *dev_priv = dev->dev_private; 776 drm_r128_private_t *dev_priv = dev->dev_private;
783 struct drm_device_dma *dma = dev->dma; 777 struct drm_device_dma *dma = dev->dma;
@@ -887,8 +881,8 @@ static int r128_cce_dispatch_blit(struct drm_device * dev,
887 * have hardware stencil support. 881 * have hardware stencil support.
888 */ 882 */
889 883
890static int r128_cce_dispatch_write_span(struct drm_device * dev, 884static int r128_cce_dispatch_write_span(struct drm_device *dev,
891 drm_r128_depth_t * depth) 885 drm_r128_depth_t *depth)
892{ 886{
893 drm_r128_private_t *dev_priv = dev->dev_private; 887 drm_r128_private_t *dev_priv = dev->dev_private;
894 int count, x, y; 888 int count, x, y;
@@ -902,12 +896,10 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
902 if (count > 4096 || count <= 0) 896 if (count > 4096 || count <= 0)
903 return -EMSGSIZE; 897 return -EMSGSIZE;
904 898
905 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { 899 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
906 return -EFAULT; 900 return -EFAULT;
907 } 901 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
908 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
909 return -EFAULT; 902 return -EFAULT;
910 }
911 903
912 buffer_size = depth->n * sizeof(u32); 904 buffer_size = depth->n * sizeof(u32);
913 buffer = kmalloc(buffer_size, GFP_KERNEL); 905 buffer = kmalloc(buffer_size, GFP_KERNEL);
@@ -983,8 +975,8 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
983 return 0; 975 return 0;
984} 976}
985 977
986static int r128_cce_dispatch_write_pixels(struct drm_device * dev, 978static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
987 drm_r128_depth_t * depth) 979 drm_r128_depth_t *depth)
988{ 980{
989 drm_r128_private_t *dev_priv = dev->dev_private; 981 drm_r128_private_t *dev_priv = dev->dev_private;
990 int count, *x, *y; 982 int count, *x, *y;
@@ -1001,9 +993,8 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1001 xbuf_size = count * sizeof(*x); 993 xbuf_size = count * sizeof(*x);
1002 ybuf_size = count * sizeof(*y); 994 ybuf_size = count * sizeof(*y);
1003 x = kmalloc(xbuf_size, GFP_KERNEL); 995 x = kmalloc(xbuf_size, GFP_KERNEL);
1004 if (x == NULL) { 996 if (x == NULL)
1005 return -ENOMEM; 997 return -ENOMEM;
1006 }
1007 y = kmalloc(ybuf_size, GFP_KERNEL); 998 y = kmalloc(ybuf_size, GFP_KERNEL);
1008 if (y == NULL) { 999 if (y == NULL) {
1009 kfree(x); 1000 kfree(x);
@@ -1105,8 +1096,8 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1105 return 0; 1096 return 0;
1106} 1097}
1107 1098
1108static int r128_cce_dispatch_read_span(struct drm_device * dev, 1099static int r128_cce_dispatch_read_span(struct drm_device *dev,
1109 drm_r128_depth_t * depth) 1100 drm_r128_depth_t *depth)
1110{ 1101{
1111 drm_r128_private_t *dev_priv = dev->dev_private; 1102 drm_r128_private_t *dev_priv = dev->dev_private;
1112 int count, x, y; 1103 int count, x, y;
@@ -1117,12 +1108,10 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev,
1117 if (count > 4096 || count <= 0) 1108 if (count > 4096 || count <= 0)
1118 return -EMSGSIZE; 1109 return -EMSGSIZE;
1119 1110
1120 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { 1111 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
1121 return -EFAULT; 1112 return -EFAULT;
1122 } 1113 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
1123 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
1124 return -EFAULT; 1114 return -EFAULT;
1125 }
1126 1115
1127 BEGIN_RING(7); 1116 BEGIN_RING(7);
1128 1117
@@ -1148,8 +1137,8 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev,
1148 return 0; 1137 return 0;
1149} 1138}
1150 1139
1151static int r128_cce_dispatch_read_pixels(struct drm_device * dev, 1140static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
1152 drm_r128_depth_t * depth) 1141 drm_r128_depth_t *depth)
1153{ 1142{
1154 drm_r128_private_t *dev_priv = dev->dev_private; 1143 drm_r128_private_t *dev_priv = dev->dev_private;
1155 int count, *x, *y; 1144 int count, *x, *y;
@@ -1161,16 +1150,14 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
1161 if (count > 4096 || count <= 0) 1150 if (count > 4096 || count <= 0)
1162 return -EMSGSIZE; 1151 return -EMSGSIZE;
1163 1152
1164 if (count > dev_priv->depth_pitch) { 1153 if (count > dev_priv->depth_pitch)
1165 count = dev_priv->depth_pitch; 1154 count = dev_priv->depth_pitch;
1166 }
1167 1155
1168 xbuf_size = count * sizeof(*x); 1156 xbuf_size = count * sizeof(*x);
1169 ybuf_size = count * sizeof(*y); 1157 ybuf_size = count * sizeof(*y);
1170 x = kmalloc(xbuf_size, GFP_KERNEL); 1158 x = kmalloc(xbuf_size, GFP_KERNEL);
1171 if (x == NULL) { 1159 if (x == NULL)
1172 return -ENOMEM; 1160 return -ENOMEM;
1173 }
1174 y = kmalloc(ybuf_size, GFP_KERNEL); 1161 y = kmalloc(ybuf_size, GFP_KERNEL);
1175 if (y == NULL) { 1162 if (y == NULL) {
1176 kfree(x); 1163 kfree(x);
@@ -1220,7 +1207,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
1220 * Polygon stipple 1207 * Polygon stipple
1221 */ 1208 */
1222 1209
1223static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple) 1210static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple)
1224{ 1211{
1225 drm_r128_private_t *dev_priv = dev->dev_private; 1212 drm_r128_private_t *dev_priv = dev->dev_private;
1226 int i; 1213 int i;
@@ -1230,9 +1217,8 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
1230 BEGIN_RING(33); 1217 BEGIN_RING(33);
1231 1218
1232 OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31)); 1219 OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
1233 for (i = 0; i < 32; i++) { 1220 for (i = 0; i < 32; i++)
1234 OUT_RING(stipple[i]); 1221 OUT_RING(stipple[i]);
1235 }
1236 1222
1237 ADVANCE_RING(); 1223 ADVANCE_RING();
1238} 1224}
@@ -1269,7 +1255,7 @@ static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *f
1269 return 0; 1255 return 0;
1270} 1256}
1271 1257
1272static int r128_do_init_pageflip(struct drm_device * dev) 1258static int r128_do_init_pageflip(struct drm_device *dev)
1273{ 1259{
1274 drm_r128_private_t *dev_priv = dev->dev_private; 1260 drm_r128_private_t *dev_priv = dev->dev_private;
1275 DRM_DEBUG("\n"); 1261 DRM_DEBUG("\n");
@@ -1288,7 +1274,7 @@ static int r128_do_init_pageflip(struct drm_device * dev)
1288 return 0; 1274 return 0;
1289} 1275}
1290 1276
1291static int r128_do_cleanup_pageflip(struct drm_device * dev) 1277static int r128_do_cleanup_pageflip(struct drm_device *dev)
1292{ 1278{
1293 drm_r128_private_t *dev_priv = dev->dev_private; 1279 drm_r128_private_t *dev_priv = dev->dev_private;
1294 DRM_DEBUG("\n"); 1280 DRM_DEBUG("\n");
@@ -1645,17 +1631,16 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
1645 return 0; 1631 return 0;
1646} 1632}
1647 1633
1648void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1634void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1649{ 1635{
1650 if (dev->dev_private) { 1636 if (dev->dev_private) {
1651 drm_r128_private_t *dev_priv = dev->dev_private; 1637 drm_r128_private_t *dev_priv = dev->dev_private;
1652 if (dev_priv->page_flipping) { 1638 if (dev_priv->page_flipping)
1653 r128_do_cleanup_pageflip(dev); 1639 r128_do_cleanup_pageflip(dev);
1654 }
1655 } 1640 }
1656} 1641}
1657 1642
1658void r128_driver_lastclose(struct drm_device * dev) 1643void r128_driver_lastclose(struct drm_device *dev)
1659{ 1644{
1660 r128_do_cleanup_cce(dev); 1645 r128_do_cleanup_cce(dev);
1661} 1646}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 84b1f2729d43..aebe00875041 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -69,5 +69,6 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
69 69
70radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 70radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
71radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 71radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
72radeon-$(CONFIG_ACPI) += radeon_acpi.o
72 73
73obj-$(CONFIG_DRM_RADEON)+= radeon.o 74obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 1d569830ed99..8e421f644a54 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -108,12 +108,11 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
108 base++; 108 base++;
109 break; 109 break;
110 case ATOM_IIO_READ: 110 case ATOM_IIO_READ:
111 temp = ctx->card->reg_read(ctx->card, CU16(base + 1)); 111 temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
112 base += 3; 112 base += 3;
113 break; 113 break;
114 case ATOM_IIO_WRITE: 114 case ATOM_IIO_WRITE:
115 (void)ctx->card->reg_read(ctx->card, CU16(base + 1)); 115 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
116 ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
117 base += 3; 116 base += 3;
118 break; 117 break;
119 case ATOM_IIO_CLEAR: 118 case ATOM_IIO_CLEAR:
@@ -715,8 +714,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
715 cjiffies = jiffies; 714 cjiffies = jiffies;
716 if (time_after(cjiffies, ctx->last_jump_jiffies)) { 715 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
717 cjiffies -= ctx->last_jump_jiffies; 716 cjiffies -= ctx->last_jump_jiffies;
718 if ((jiffies_to_msecs(cjiffies) > 1000)) { 717 if ((jiffies_to_msecs(cjiffies) > 5000)) {
719 DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n"); 718 DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
720 ctx->abort = true; 719 ctx->abort = true;
721 } 720 }
722 } else { 721 } else {
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index cd1b64ab5ca7..a589a55b223e 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -113,6 +113,8 @@ struct card_info {
113 struct drm_device *dev; 113 struct drm_device *dev;
114 void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ 114 void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
115 uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */ 115 uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
116 void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
117 uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */
116 void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ 118 void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
117 uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */ 119 uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
118 void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ 120 void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 8c2d6478a221..ec702345d70e 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -669,56 +669,25 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
669 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 669 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
670} 670}
671 671
672static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 672static void atombios_crtc_program_pll(struct drm_crtc *crtc,
673 int crtc_id,
674 int pll_id,
675 u32 encoder_mode,
676 u32 encoder_id,
677 u32 clock,
678 u32 ref_div,
679 u32 fb_div,
680 u32 frac_fb_div,
681 u32 post_div)
673{ 682{
674 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
675 struct drm_device *dev = crtc->dev; 683 struct drm_device *dev = crtc->dev;
676 struct radeon_device *rdev = dev->dev_private; 684 struct radeon_device *rdev = dev->dev_private;
677 struct drm_encoder *encoder = NULL;
678 struct radeon_encoder *radeon_encoder = NULL;
679 u8 frev, crev; 685 u8 frev, crev;
680 int index; 686 int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
681 union set_pixel_clock args; 687 union set_pixel_clock args;
682 u32 pll_clock = mode->clock;
683 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
684 struct radeon_pll *pll;
685 u32 adjusted_clock;
686 int encoder_mode = 0;
687 688
688 memset(&args, 0, sizeof(args)); 689 memset(&args, 0, sizeof(args));
689 690
690 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
691 if (encoder->crtc == crtc) {
692 radeon_encoder = to_radeon_encoder(encoder);
693 encoder_mode = atombios_get_encoder_mode(encoder);
694 break;
695 }
696 }
697
698 if (!radeon_encoder)
699 return;
700
701 switch (radeon_crtc->pll_id) {
702 case ATOM_PPLL1:
703 pll = &rdev->clock.p1pll;
704 break;
705 case ATOM_PPLL2:
706 pll = &rdev->clock.p2pll;
707 break;
708 case ATOM_DCPLL:
709 case ATOM_PPLL_INVALID:
710 default:
711 pll = &rdev->clock.dcpll;
712 break;
713 }
714
715 /* adjust pixel clock as needed */
716 adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
717
718 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
719 &ref_div, &post_div);
720
721 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
722 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 691 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
723 &crev)) 692 &crev))
724 return; 693 return;
@@ -727,47 +696,49 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
727 case 1: 696 case 1:
728 switch (crev) { 697 switch (crev) {
729 case 1: 698 case 1:
730 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 699 if (clock == ATOM_DISABLE)
700 return;
701 args.v1.usPixelClock = cpu_to_le16(clock / 10);
731 args.v1.usRefDiv = cpu_to_le16(ref_div); 702 args.v1.usRefDiv = cpu_to_le16(ref_div);
732 args.v1.usFbDiv = cpu_to_le16(fb_div); 703 args.v1.usFbDiv = cpu_to_le16(fb_div);
733 args.v1.ucFracFbDiv = frac_fb_div; 704 args.v1.ucFracFbDiv = frac_fb_div;
734 args.v1.ucPostDiv = post_div; 705 args.v1.ucPostDiv = post_div;
735 args.v1.ucPpll = radeon_crtc->pll_id; 706 args.v1.ucPpll = pll_id;
736 args.v1.ucCRTC = radeon_crtc->crtc_id; 707 args.v1.ucCRTC = crtc_id;
737 args.v1.ucRefDivSrc = 1; 708 args.v1.ucRefDivSrc = 1;
738 break; 709 break;
739 case 2: 710 case 2:
740 args.v2.usPixelClock = cpu_to_le16(mode->clock / 10); 711 args.v2.usPixelClock = cpu_to_le16(clock / 10);
741 args.v2.usRefDiv = cpu_to_le16(ref_div); 712 args.v2.usRefDiv = cpu_to_le16(ref_div);
742 args.v2.usFbDiv = cpu_to_le16(fb_div); 713 args.v2.usFbDiv = cpu_to_le16(fb_div);
743 args.v2.ucFracFbDiv = frac_fb_div; 714 args.v2.ucFracFbDiv = frac_fb_div;
744 args.v2.ucPostDiv = post_div; 715 args.v2.ucPostDiv = post_div;
745 args.v2.ucPpll = radeon_crtc->pll_id; 716 args.v2.ucPpll = pll_id;
746 args.v2.ucCRTC = radeon_crtc->crtc_id; 717 args.v2.ucCRTC = crtc_id;
747 args.v2.ucRefDivSrc = 1; 718 args.v2.ucRefDivSrc = 1;
748 break; 719 break;
749 case 3: 720 case 3:
750 args.v3.usPixelClock = cpu_to_le16(mode->clock / 10); 721 args.v3.usPixelClock = cpu_to_le16(clock / 10);
751 args.v3.usRefDiv = cpu_to_le16(ref_div); 722 args.v3.usRefDiv = cpu_to_le16(ref_div);
752 args.v3.usFbDiv = cpu_to_le16(fb_div); 723 args.v3.usFbDiv = cpu_to_le16(fb_div);
753 args.v3.ucFracFbDiv = frac_fb_div; 724 args.v3.ucFracFbDiv = frac_fb_div;
754 args.v3.ucPostDiv = post_div; 725 args.v3.ucPostDiv = post_div;
755 args.v3.ucPpll = radeon_crtc->pll_id; 726 args.v3.ucPpll = pll_id;
756 args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2); 727 args.v3.ucMiscInfo = (pll_id << 2);
757 args.v3.ucTransmitterId = radeon_encoder->encoder_id; 728 args.v3.ucTransmitterId = encoder_id;
758 args.v3.ucEncoderMode = encoder_mode; 729 args.v3.ucEncoderMode = encoder_mode;
759 break; 730 break;
760 case 5: 731 case 5:
761 args.v5.ucCRTC = radeon_crtc->crtc_id; 732 args.v5.ucCRTC = crtc_id;
762 args.v5.usPixelClock = cpu_to_le16(mode->clock / 10); 733 args.v5.usPixelClock = cpu_to_le16(clock / 10);
763 args.v5.ucRefDiv = ref_div; 734 args.v5.ucRefDiv = ref_div;
764 args.v5.usFbDiv = cpu_to_le16(fb_div); 735 args.v5.usFbDiv = cpu_to_le16(fb_div);
765 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); 736 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
766 args.v5.ucPostDiv = post_div; 737 args.v5.ucPostDiv = post_div;
767 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ 738 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
768 args.v5.ucTransmitterID = radeon_encoder->encoder_id; 739 args.v5.ucTransmitterID = encoder_id;
769 args.v5.ucEncoderMode = encoder_mode; 740 args.v5.ucEncoderMode = encoder_mode;
770 args.v5.ucPpll = radeon_crtc->pll_id; 741 args.v5.ucPpll = pll_id;
771 break; 742 break;
772 default: 743 default:
773 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 744 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
@@ -782,6 +753,56 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
782 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 753 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
783} 754}
784 755
756static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
757{
758 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
759 struct drm_device *dev = crtc->dev;
760 struct radeon_device *rdev = dev->dev_private;
761 struct drm_encoder *encoder = NULL;
762 struct radeon_encoder *radeon_encoder = NULL;
763 u32 pll_clock = mode->clock;
764 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
765 struct radeon_pll *pll;
766 u32 adjusted_clock;
767 int encoder_mode = 0;
768
769 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
770 if (encoder->crtc == crtc) {
771 radeon_encoder = to_radeon_encoder(encoder);
772 encoder_mode = atombios_get_encoder_mode(encoder);
773 break;
774 }
775 }
776
777 if (!radeon_encoder)
778 return;
779
780 switch (radeon_crtc->pll_id) {
781 case ATOM_PPLL1:
782 pll = &rdev->clock.p1pll;
783 break;
784 case ATOM_PPLL2:
785 pll = &rdev->clock.p2pll;
786 break;
787 case ATOM_DCPLL:
788 case ATOM_PPLL_INVALID:
789 default:
790 pll = &rdev->clock.dcpll;
791 break;
792 }
793
794 /* adjust pixel clock as needed */
795 adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
796
797 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
798 &ref_div, &post_div);
799
800 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
801 encoder_mode, radeon_encoder->encoder_id, mode->clock,
802 ref_div, fb_div, frac_fb_div, post_div);
803
804}
805
785static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, 806static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
786 struct drm_framebuffer *old_fb) 807 struct drm_framebuffer *old_fb)
787{ 808{
@@ -841,6 +862,11 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
841 return -EINVAL; 862 return -EINVAL;
842 } 863 }
843 864
865 if (tiling_flags & RADEON_TILING_MACRO)
866 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
867 else if (tiling_flags & RADEON_TILING_MICRO)
868 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
869
844 switch (radeon_crtc->crtc_id) { 870 switch (radeon_crtc->crtc_id) {
845 case 0: 871 case 0:
846 WREG32(AVIVO_D1VGA_CONTROL, 0); 872 WREG32(AVIVO_D1VGA_CONTROL, 0);
@@ -979,11 +1005,18 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
979 return -EINVAL; 1005 return -EINVAL;
980 } 1006 }
981 1007
982 if (tiling_flags & RADEON_TILING_MACRO) 1008 if (rdev->family >= CHIP_R600) {
983 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; 1009 if (tiling_flags & RADEON_TILING_MACRO)
1010 fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1;
1011 else if (tiling_flags & RADEON_TILING_MICRO)
1012 fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1;
1013 } else {
1014 if (tiling_flags & RADEON_TILING_MACRO)
1015 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
984 1016
985 if (tiling_flags & RADEON_TILING_MICRO) 1017 if (tiling_flags & RADEON_TILING_MICRO)
986 fb_format |= AVIVO_D1GRPH_TILED; 1018 fb_format |= AVIVO_D1GRPH_TILED;
1019 }
987 1020
988 if (radeon_crtc->crtc_id == 0) 1021 if (radeon_crtc->crtc_id == 0)
989 WREG32(AVIVO_D1VGA_CONTROL, 0); 1022 WREG32(AVIVO_D1VGA_CONTROL, 0);
@@ -1191,6 +1224,24 @@ static void atombios_crtc_commit(struct drm_crtc *crtc)
1191 atombios_lock_crtc(crtc, ATOM_DISABLE); 1224 atombios_lock_crtc(crtc, ATOM_DISABLE);
1192} 1225}
1193 1226
1227static void atombios_crtc_disable(struct drm_crtc *crtc)
1228{
1229 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1230 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1231
1232 switch (radeon_crtc->pll_id) {
1233 case ATOM_PPLL1:
1234 case ATOM_PPLL2:
1235 /* disable the ppll */
1236 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1237 0, 0, ATOM_DISABLE, 0, 0, 0, 0);
1238 break;
1239 default:
1240 break;
1241 }
1242 radeon_crtc->pll_id = -1;
1243}
1244
1194static const struct drm_crtc_helper_funcs atombios_helper_funcs = { 1245static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
1195 .dpms = atombios_crtc_dpms, 1246 .dpms = atombios_crtc_dpms,
1196 .mode_fixup = atombios_crtc_mode_fixup, 1247 .mode_fixup = atombios_crtc_mode_fixup,
@@ -1199,6 +1250,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
1199 .prepare = atombios_crtc_prepare, 1250 .prepare = atombios_crtc_prepare,
1200 .commit = atombios_crtc_commit, 1251 .commit = atombios_crtc_commit,
1201 .load_lut = radeon_crtc_load_lut, 1252 .load_lut = radeon_crtc_load_lut,
1253 .disable = atombios_crtc_disable,
1202}; 1254};
1203 1255
1204void radeon_atombios_init_crtc(struct drm_device *dev, 1256void radeon_atombios_init_crtc(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 057192acdd36..957d5067ad9c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,6 +39,23 @@
39static void evergreen_gpu_init(struct radeon_device *rdev); 39static void evergreen_gpu_init(struct radeon_device *rdev);
40void evergreen_fini(struct radeon_device *rdev); 40void evergreen_fini(struct radeon_device *rdev);
41 41
42/* get temperature in millidegrees */
43u32 evergreen_get_temp(struct radeon_device *rdev)
44{
45 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
46 ASIC_T_SHIFT;
47 u32 actual_temp = 0;
48
49 if ((temp >> 10) & 1)
50 actual_temp = 0;
51 else if ((temp >> 9) & 1)
52 actual_temp = 255;
53 else
54 actual_temp = (temp >> 1) & 0xff;
55
56 return actual_temp * 1000;
57}
58
42void evergreen_pm_misc(struct radeon_device *rdev) 59void evergreen_pm_misc(struct radeon_device *rdev)
43{ 60{
44 int req_ps_idx = rdev->pm.requested_power_state_index; 61 int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -1115,6 +1132,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1115 rdev->config.evergreen.max_backends) & 1132 rdev->config.evergreen.max_backends) &
1116 EVERGREEN_MAX_BACKENDS_MASK)); 1133 EVERGREEN_MAX_BACKENDS_MASK));
1117 1134
1135 rdev->config.evergreen.tile_config = gb_addr_config;
1118 WREG32(GB_BACKEND_MAP, gb_backend_map); 1136 WREG32(GB_BACKEND_MAP, gb_backend_map);
1119 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1137 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1120 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1138 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 010963d4570f..345a75a03c96 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -333,7 +333,6 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
333 header = radeon_get_ib_value(p, h_idx); 333 header = radeon_get_ib_value(p, h_idx);
334 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 334 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
335 reg = CP_PACKET0_GET_REG(header); 335 reg = CP_PACKET0_GET_REG(header);
336 mutex_lock(&p->rdev->ddev->mode_config.mutex);
337 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 336 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
338 if (!obj) { 337 if (!obj) {
339 DRM_ERROR("cannot find crtc %d\n", crtc_id); 338 DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -368,7 +367,6 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
368 } 367 }
369 } 368 }
370out: 369out:
371 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
372 return r; 370 return r;
373} 371}
374 372
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index e028c1cd9d9b..2330f3a36fd5 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -61,6 +61,11 @@
61# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 61# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
62# define EVERGREEN_GRPH_FORMAT_RGB111110 6 62# define EVERGREEN_GRPH_FORMAT_RGB111110 6
63# define EVERGREEN_GRPH_FORMAT_BGR101111 7 63# define EVERGREEN_GRPH_FORMAT_BGR101111 7
64# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
65# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
66# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
67# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
68# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
64#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c 69#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
65# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0) 70# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
66# define EVERGREEN_GRPH_ENDIAN_NONE 0 71# define EVERGREEN_GRPH_ENDIAN_NONE 0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a1cd621780e2..9b7532dd30f7 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -165,6 +165,11 @@
165#define SE_DB_BUSY (1 << 30) 165#define SE_DB_BUSY (1 << 30)
166#define SE_CB_BUSY (1 << 31) 166#define SE_CB_BUSY (1 << 31)
167 167
168#define CG_MULT_THERMAL_STATUS 0x740
169#define ASIC_T(x) ((x) << 16)
170#define ASIC_T_MASK 0x7FF0000
171#define ASIC_T_SHIFT 16
172
168#define HDP_HOST_PATH_CNTL 0x2C00 173#define HDP_HOST_PATH_CNTL 0x2C00
169#define HDP_NONSURFACE_BASE 0x2C04 174#define HDP_NONSURFACE_BASE 0x2C04
170#define HDP_NONSURFACE_INFO 0x2C08 175#define HDP_NONSURFACE_INFO 0x2C08
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ab37717a5d39..e115583f84fb 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1230,7 +1230,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1230 header = radeon_get_ib_value(p, h_idx); 1230 header = radeon_get_ib_value(p, h_idx);
1231 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1231 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1232 reg = CP_PACKET0_GET_REG(header); 1232 reg = CP_PACKET0_GET_REG(header);
1233 mutex_lock(&p->rdev->ddev->mode_config.mutex);
1234 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1233 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1235 if (!obj) { 1234 if (!obj) {
1236 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1235 DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -1264,7 +1263,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1264 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1263 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1265 } 1264 }
1266out: 1265out:
1267 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
1268 return r; 1266 return r;
1269} 1267}
1270 1268
@@ -2354,6 +2352,7 @@ void r100_mc_init(struct radeon_device *rdev)
2354 if (rdev->flags & RADEON_IS_IGP) 2352 if (rdev->flags & RADEON_IS_IGP)
2355 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 2353 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2356 radeon_vram_location(rdev, &rdev->mc, base); 2354 radeon_vram_location(rdev, &rdev->mc, base);
2355 rdev->mc.gtt_base_align = 0;
2357 if (!(rdev->flags & RADEON_IS_AGP)) 2356 if (!(rdev->flags & RADEON_IS_AGP))
2358 radeon_gtt_location(rdev, &rdev->mc); 2357 radeon_gtt_location(rdev, &rdev->mc);
2359 radeon_update_bandwidth_info(rdev); 2358 radeon_update_bandwidth_info(rdev);
@@ -2365,11 +2364,10 @@ void r100_mc_init(struct radeon_device *rdev)
2365 */ 2364 */
2366void r100_pll_errata_after_index(struct radeon_device *rdev) 2365void r100_pll_errata_after_index(struct radeon_device *rdev)
2367{ 2366{
2368 if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) { 2367 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2369 return; 2368 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
2369 (void)RREG32(RADEON_CRTC_GEN_CNTL);
2370 } 2370 }
2371 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
2372 (void)RREG32(RADEON_CRTC_GEN_CNTL);
2373} 2371}
2374 2372
2375static void r100_pll_errata_after_data(struct radeon_device *rdev) 2373static void r100_pll_errata_after_data(struct radeon_device *rdev)
@@ -3810,6 +3808,31 @@ void r100_fini(struct radeon_device *rdev)
3810 rdev->bios = NULL; 3808 rdev->bios = NULL;
3811} 3809}
3812 3810
3811/*
3812 * Due to how kexec works, it can leave the hw fully initialised when it
3813 * boots the new kernel. However doing our init sequence with the CP and
3814 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
3815 * do some quick sanity checks and restore sane values to avoid this
3816 * problem.
3817 */
3818void r100_restore_sanity(struct radeon_device *rdev)
3819{
3820 u32 tmp;
3821
3822 tmp = RREG32(RADEON_CP_CSQ_CNTL);
3823 if (tmp) {
3824 WREG32(RADEON_CP_CSQ_CNTL, 0);
3825 }
3826 tmp = RREG32(RADEON_CP_RB_CNTL);
3827 if (tmp) {
3828 WREG32(RADEON_CP_RB_CNTL, 0);
3829 }
3830 tmp = RREG32(RADEON_SCRATCH_UMSK);
3831 if (tmp) {
3832 WREG32(RADEON_SCRATCH_UMSK, 0);
3833 }
3834}
3835
3813int r100_init(struct radeon_device *rdev) 3836int r100_init(struct radeon_device *rdev)
3814{ 3837{
3815 int r; 3838 int r;
@@ -3822,6 +3845,8 @@ int r100_init(struct radeon_device *rdev)
3822 radeon_scratch_init(rdev); 3845 radeon_scratch_init(rdev);
3823 /* Initialize surface registers */ 3846 /* Initialize surface registers */
3824 radeon_surface_init(rdev); 3847 radeon_surface_init(rdev);
3848 /* sanity check some register to avoid hangs like after kexec */
3849 r100_restore_sanity(rdev);
3825 /* TODO: disable VGA need to use VGA request */ 3850 /* TODO: disable VGA need to use VGA request */
3826 /* BIOS*/ 3851 /* BIOS*/
3827 if (!radeon_get_bios(rdev)) { 3852 if (!radeon_get_bios(rdev)) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 7e81db5eb804..58eab5d47305 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -481,6 +481,7 @@ void r300_mc_init(struct radeon_device *rdev)
481 if (rdev->flags & RADEON_IS_IGP) 481 if (rdev->flags & RADEON_IS_IGP)
482 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 482 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
483 radeon_vram_location(rdev, &rdev->mc, base); 483 radeon_vram_location(rdev, &rdev->mc, base);
484 rdev->mc.gtt_base_align = 0;
484 if (!(rdev->flags & RADEON_IS_AGP)) 485 if (!(rdev->flags & RADEON_IS_AGP))
485 radeon_gtt_location(rdev, &rdev->mc); 486 radeon_gtt_location(rdev, &rdev->mc);
486 radeon_update_bandwidth_info(rdev); 487 radeon_update_bandwidth_info(rdev);
@@ -1176,6 +1177,8 @@ int r300_cs_parse(struct radeon_cs_parser *p)
1176 int r; 1177 int r;
1177 1178
1178 track = kzalloc(sizeof(*track), GFP_KERNEL); 1179 track = kzalloc(sizeof(*track), GFP_KERNEL);
1180 if (track == NULL)
1181 return -ENOMEM;
1179 r100_cs_track_clear(p->rdev, track); 1182 r100_cs_track_clear(p->rdev, track);
1180 p->track = track; 1183 p->track = track;
1181 do { 1184 do {
@@ -1377,6 +1380,8 @@ int r300_init(struct radeon_device *rdev)
1377 /* Initialize surface registers */ 1380 /* Initialize surface registers */
1378 radeon_surface_init(rdev); 1381 radeon_surface_init(rdev);
1379 /* TODO: disable VGA need to use VGA request */ 1382 /* TODO: disable VGA need to use VGA request */
1383 /* restore some register to sane defaults */
1384 r100_restore_sanity(rdev);
1380 /* BIOS*/ 1385 /* BIOS*/
1381 if (!radeon_get_bios(rdev)) { 1386 if (!radeon_get_bios(rdev)) {
1382 if (ASIC_IS_AVIVO(rdev)) 1387 if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index e6c89142bb4d..59f7bccc5be0 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -343,6 +343,8 @@ int r420_init(struct radeon_device *rdev)
343 /* Initialize surface registers */ 343 /* Initialize surface registers */
344 radeon_surface_init(rdev); 344 radeon_surface_init(rdev);
345 /* TODO: disable VGA need to use VGA request */ 345 /* TODO: disable VGA need to use VGA request */
346 /* restore some register to sane defaults */
347 r100_restore_sanity(rdev);
346 /* BIOS*/ 348 /* BIOS*/
347 if (!radeon_get_bios(rdev)) { 349 if (!radeon_get_bios(rdev)) {
348 if (ASIC_IS_AVIVO(rdev)) 350 if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 93c9a2bbccf8..6ac1f604e29b 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -386,6 +386,11 @@
386# define AVIVO_D1GRPH_TILED (1 << 20) 386# define AVIVO_D1GRPH_TILED (1 << 20)
387# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21) 387# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
388 388
389# define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
390# define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
391# define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
392# define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
393
389/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2 394/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
390 * block and vice versa. This applies to GRPH, CUR, etc. 395 * block and vice versa. This applies to GRPH, CUR, etc.
391 */ 396 */
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 34330df28483..1458dee902dd 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -125,6 +125,7 @@ void r520_mc_init(struct radeon_device *rdev)
125 r520_vram_get_type(rdev); 125 r520_vram_get_type(rdev);
126 r100_vram_init_sizes(rdev); 126 r100_vram_init_sizes(rdev);
127 radeon_vram_location(rdev, &rdev->mc, 0); 127 radeon_vram_location(rdev, &rdev->mc, 0);
128 rdev->mc.gtt_base_align = 0;
128 if (!(rdev->flags & RADEON_IS_AGP)) 129 if (!(rdev->flags & RADEON_IS_AGP))
129 radeon_gtt_location(rdev, &rdev->mc); 130 radeon_gtt_location(rdev, &rdev->mc);
130 radeon_update_bandwidth_info(rdev); 131 radeon_update_bandwidth_info(rdev);
@@ -230,6 +231,8 @@ int r520_init(struct radeon_device *rdev)
230 radeon_scratch_init(rdev); 231 radeon_scratch_init(rdev);
231 /* Initialize surface registers */ 232 /* Initialize surface registers */
232 radeon_surface_init(rdev); 233 radeon_surface_init(rdev);
234 /* restore some register to sane defaults */
235 r100_restore_sanity(rdev);
233 /* TODO: disable VGA need to use VGA request */ 236 /* TODO: disable VGA need to use VGA request */
234 /* BIOS*/ 237 /* BIOS*/
235 if (!radeon_get_bios(rdev)) { 238 if (!radeon_get_bios(rdev)) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a73a6e17588d..28e39bc6768b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -92,6 +92,21 @@ void r600_gpu_init(struct radeon_device *rdev);
92void r600_fini(struct radeon_device *rdev); 92void r600_fini(struct radeon_device *rdev);
93void r600_irq_disable(struct radeon_device *rdev); 93void r600_irq_disable(struct radeon_device *rdev);
94 94
95/* get temperature in millidegrees */
96u32 rv6xx_get_temp(struct radeon_device *rdev)
97{
98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
99 ASIC_T_SHIFT;
100 u32 actual_temp = 0;
101
102 if ((temp >> 7) & 1)
103 actual_temp = 0;
104 else
105 actual_temp = (temp >> 1) & 0xff;
106
107 return actual_temp * 1000;
108}
109
95void r600_pm_get_dynpm_state(struct radeon_device *rdev) 110void r600_pm_get_dynpm_state(struct radeon_device *rdev)
96{ 111{
97 int i; 112 int i;
@@ -869,7 +884,17 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
869 u32 tmp; 884 u32 tmp;
870 885
871 /* flush hdp cache so updates hit vram */ 886 /* flush hdp cache so updates hit vram */
872 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 887 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
888 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
889 u32 tmp;
890
891 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
892 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
893 */
894 WREG32(HDP_DEBUG1, 0);
895 tmp = readl((void __iomem *)ptr);
896 } else
897 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
873 898
874 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); 899 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
875 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); 900 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
@@ -1179,6 +1204,7 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1179 if (rdev->flags & RADEON_IS_IGP) 1204 if (rdev->flags & RADEON_IS_IGP)
1180 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; 1205 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
1181 radeon_vram_location(rdev, &rdev->mc, base); 1206 radeon_vram_location(rdev, &rdev->mc, base);
1207 rdev->mc.gtt_base_align = 0;
1182 radeon_gtt_location(rdev, mc); 1208 radeon_gtt_location(rdev, mc);
1183 } 1209 }
1184} 1210}
@@ -1608,7 +1634,7 @@ void r600_gpu_init(struct radeon_device *rdev)
1608 r600_count_pipe_bits((cc_rb_backend_disable & 1634 r600_count_pipe_bits((cc_rb_backend_disable &
1609 R6XX_MAX_BACKENDS_MASK) >> 16)), 1635 R6XX_MAX_BACKENDS_MASK) >> 16)),
1610 (cc_rb_backend_disable >> 16)); 1636 (cc_rb_backend_disable >> 16));
1611 1637 rdev->config.r600.tile_config = tiling_config;
1612 tiling_config |= BACKEND_MAP(backend_map); 1638 tiling_config |= BACKEND_MAP(backend_map);
1613 WREG32(GB_TILING_CONFIG, tiling_config); 1639 WREG32(GB_TILING_CONFIG, tiling_config);
1614 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1640 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
@@ -3511,5 +3537,15 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3511 */ 3537 */
3512void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) 3538void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3513{ 3539{
3514 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 3540 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3541 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
3542 */
3543 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
3544 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
3545 u32 tmp;
3546
3547 WREG32(HDP_DEBUG1, 0);
3548 tmp = readl((void __iomem *)ptr);
3549 } else
3550 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3515} 3551}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 2b26553c352c..b5443fe1c1d1 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -63,7 +63,8 @@ int r600_audio_bits_per_sample(struct radeon_device *rdev)
63 case 0x4: return 32; 63 case 0x4: return 32;
64 } 64 }
65 65
66 DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value); 66 dev_err(rdev->dev, "Unknown bits per sample 0x%x using 16 instead\n",
67 (int)value);
67 68
68 return 16; 69 return 16;
69} 70}
@@ -150,7 +151,8 @@ static void r600_audio_update_hdmi(unsigned long param)
150 r600_hdmi_update_audio_settings(encoder); 151 r600_hdmi_update_audio_settings(encoder);
151 } 152 }
152 153
153 if(still_going) r600_audio_schedule_polling(rdev); 154 if (still_going)
155 r600_audio_schedule_polling(rdev);
154} 156}
155 157
156/* 158/*
@@ -158,8 +160,9 @@ static void r600_audio_update_hdmi(unsigned long param)
158 */ 160 */
159static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable) 161static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
160{ 162{
161 DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling"); 163 DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
162 WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000); 164 WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
165 rdev->audio_enabled = enable;
163} 166}
164 167
165/* 168/*
@@ -195,12 +198,14 @@ void r600_audio_enable_polling(struct drm_encoder *encoder)
195 struct radeon_device *rdev = dev->dev_private; 198 struct radeon_device *rdev = dev->dev_private;
196 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 199 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
197 200
198 DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active); 201 DRM_DEBUG("r600_audio_enable_polling: %d\n",
202 radeon_encoder->audio_polling_active);
199 if (radeon_encoder->audio_polling_active) 203 if (radeon_encoder->audio_polling_active)
200 return; 204 return;
201 205
202 radeon_encoder->audio_polling_active = 1; 206 radeon_encoder->audio_polling_active = 1;
203 mod_timer(&rdev->audio_timer, jiffies + 1); 207 if (rdev->audio_enabled)
208 mod_timer(&rdev->audio_timer, jiffies + 1);
204} 209}
205 210
206/* 211/*
@@ -209,7 +214,8 @@ void r600_audio_enable_polling(struct drm_encoder *encoder)
209void r600_audio_disable_polling(struct drm_encoder *encoder) 214void r600_audio_disable_polling(struct drm_encoder *encoder)
210{ 215{
211 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 216 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
212 DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active); 217 DRM_DEBUG("r600_audio_disable_polling: %d\n",
218 radeon_encoder->audio_polling_active);
213 radeon_encoder->audio_polling_active = 0; 219 radeon_encoder->audio_polling_active = 0;
214} 220}
215 221
@@ -236,7 +242,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
236 WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301); 242 WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
237 break; 243 break;
238 default: 244 default:
239 DRM_ERROR("Unsupported encoder type 0x%02X\n", 245 dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n",
240 radeon_encoder->encoder_id); 246 radeon_encoder->encoder_id);
241 return; 247 return;
242 } 248 }
@@ -266,7 +272,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
266 */ 272 */
267void r600_audio_fini(struct radeon_device *rdev) 273void r600_audio_fini(struct radeon_device *rdev)
268{ 274{
269 if (!radeon_audio || !r600_audio_chipset_supported(rdev)) 275 if (!rdev->audio_enabled)
270 return; 276 return;
271 277
272 del_timer(&rdev->audio_timer); 278 del_timer(&rdev->audio_timer);
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index f4fb88ece2bb..ca5c29f70779 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -538,9 +538,12 @@ int
538r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv) 538r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv)
539{ 539{
540 drm_radeon_private_t *dev_priv = dev->dev_private; 540 drm_radeon_private_t *dev_priv = dev->dev_private;
541 int ret;
541 DRM_DEBUG("\n"); 542 DRM_DEBUG("\n");
542 543
543 r600_nomm_get_vb(dev); 544 ret = r600_nomm_get_vb(dev);
545 if (ret)
546 return ret;
544 547
545 dev_priv->blit_vb->file_priv = file_priv; 548 dev_priv->blit_vb->file_priv = file_priv;
546 549
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index 0271b53fa2dd..e8151c1d55b2 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -39,37 +39,45 @@
39 39
40const u32 r6xx_default_state[] = 40const u32 r6xx_default_state[] =
41{ 41{
42 0xc0002400, 42 0xc0002400, /* START_3D_CMDBUF */
43 0x00000000, 43 0x00000000,
44 0xc0012800, 44
45 0xc0012800, /* CONTEXT_CONTROL */
45 0x80000000, 46 0x80000000,
46 0x80000000, 47 0x80000000,
48
47 0xc0016800, 49 0xc0016800,
48 0x00000010, 50 0x00000010,
49 0x00008000, 51 0x00008000, /* WAIT_UNTIL */
52
50 0xc0016800, 53 0xc0016800,
51 0x00000542, 54 0x00000542,
52 0x07000003, 55 0x07000003, /* TA_CNTL_AUX */
56
53 0xc0016800, 57 0xc0016800,
54 0x000005c5, 58 0x000005c5,
55 0x00000000, 59 0x00000000, /* VC_ENHANCE */
60
56 0xc0016800, 61 0xc0016800,
57 0x00000363, 62 0x00000363,
58 0x00000000, 63 0x00000000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
64
59 0xc0016800, 65 0xc0016800,
60 0x0000060c, 66 0x0000060c,
61 0x82000000, 67 0x82000000, /* DB_DEBUG */
68
62 0xc0016800, 69 0xc0016800,
63 0x0000060e, 70 0x0000060e,
64 0x01020204, 71 0x01020204, /* DB_WATERMARKS */
65 0xc0016f00, 72
66 0x00000000, 73 0xc0026f00,
67 0x00000000,
68 0xc0016f00,
69 0x00000001,
70 0x00000000, 74 0x00000000,
75 0x00000000, /* SQ_VTX_BASE_VTX_LOC */
76 0x00000000, /* SQ_VTX_START_INST_LOC */
77
71 0xc0096900, 78 0xc0096900,
72 0x0000022a, 79 0x0000022a,
80 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
73 0x00000000, 81 0x00000000,
74 0x00000000, 82 0x00000000,
75 0x00000000, 83 0x00000000,
@@ -78,515 +86,317 @@ const u32 r6xx_default_state[] =
78 0x00000000, 86 0x00000000,
79 0x00000000, 87 0x00000000,
80 0x00000000, 88 0x00000000,
81 0x00000000, 89
82 0xc0016900, 90 0xc0016900,
83 0x00000004, 91 0x00000004,
84 0x00000000, 92 0x00000000, /* DB_DEPTH_INFO */
85 0xc0016900, 93
94 0xc0026900,
86 0x0000000a, 95 0x0000000a,
87 0x00000000, 96 0x00000000, /* DB_STENCIL_CLEAR */
88 0xc0016900, 97 0x00000000, /* DB_DEPTH_CLEAR */
89 0x0000000b, 98
90 0x00000000,
91 0xc0016900,
92 0x0000010c,
93 0x00000000,
94 0xc0016900,
95 0x0000010d,
96 0x00000000,
97 0xc0016900, 99 0xc0016900,
98 0x00000200, 100 0x00000200,
99 0x00000000, 101 0x00000000, /* DB_DEPTH_CONTROL */
100 0xc0016900, 102
103 0xc0026900,
101 0x00000343, 104 0x00000343,
102 0x00000060, 105 0x00000060, /* DB_RENDER_CONTROL */
103 0xc0016900, 106 0x00000040, /* DB_RENDER_OVERRIDE */
104 0x00000344, 107
105 0x00000040,
106 0xc0016900, 108 0xc0016900,
107 0x00000351, 109 0x00000351,
108 0x0000aa00, 110 0x0000aa00, /* DB_ALPHA_TO_MASK */
109 0xc0016900, 111
110 0x00000104, 112 0xc00f6900,
111 0x00000000, 113 0x00000100,
112 0xc0016900, 114 0x00000800, /* VGT_MAX_VTX_INDX */
113 0x0000010e, 115 0x00000000, /* VGT_MIN_VTX_INDX */
114 0x00000000, 116 0x00000000, /* VGT_INDX_OFFSET */
115 0xc0046900, 117 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
116 0x00000105, 118 0x00000000, /* SX_ALPHA_TEST_CONTROL */
117 0x00000000, 119 0x00000000, /* CB_BLEND_RED */
118 0x00000000,
119 0x00000000, 120 0x00000000,
120 0x00000000, 121 0x00000000,
121 0xc0036900,
122 0x00000109,
123 0x00000000, 122 0x00000000,
123 0x00000000, /* CB_FOG_RED */
124 0x00000000, 124 0x00000000,
125 0x00000000, 125 0x00000000,
126 0x00000000, /* DB_STENCILREFMASK */
127 0x00000000, /* DB_STENCILREFMASK_BF */
128 0x00000000, /* SX_ALPHA_REF */
129
126 0xc0046900, 130 0xc0046900,
127 0x0000030c, 131 0x0000030c,
128 0x01000000, 132 0x01000000, /* CB_CLRCMP_CNTL */
129 0x00000000, 133 0x00000000,
130 0x00000000, 134 0x00000000,
131 0x00000000, 135 0x00000000,
136
132 0xc0046900, 137 0xc0046900,
133 0x00000048, 138 0x00000048,
134 0x3f800000, 139 0x3f800000, /* CB_CLEAR_RED */
135 0x00000000, 140 0x00000000,
136 0x3f800000, 141 0x3f800000,
137 0x3f800000, 142 0x3f800000,
138 0xc0016900, 143
139 0x0000008e,
140 0x0000000f,
141 0xc0016900, 144 0xc0016900,
142 0x00000080, 145 0x00000080,
143 0x00000000, 146 0x00000000, /* PA_SC_WINDOW_OFFSET */
144 0xc0016900, 147
148 0xc00a6900,
145 0x00000083, 149 0x00000083,
146 0x0000ffff, 150 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
147 0xc0016900, 151 0x00000000, /* PA_SC_CLIPRECT_0_TL */
148 0x00000084,
149 0x00000000,
150 0xc0016900,
151 0x00000085,
152 0x20002000, 152 0x20002000,
153 0xc0016900,
154 0x00000086,
155 0x00000000, 153 0x00000000,
156 0xc0016900,
157 0x00000087,
158 0x20002000, 154 0x20002000,
159 0xc0016900,
160 0x00000088,
161 0x00000000, 155 0x00000000,
162 0xc0016900,
163 0x00000089,
164 0x20002000, 156 0x20002000,
165 0xc0016900,
166 0x0000008a,
167 0x00000000, 157 0x00000000,
168 0xc0016900,
169 0x0000008b,
170 0x20002000, 158 0x20002000,
171 0xc0016900, 159 0x00000000, /* PA_SC_EDGERULE */
172 0x0000008c, 160
173 0x00000000, 161 0xc0406900,
174 0xc0016900,
175 0x00000094, 162 0x00000094,
176 0x80000000, 163 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
177 0xc0016900, 164 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
178 0x00000095, 165 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
179 0x20002000, 166 0x20002000,
180 0xc0026900,
181 0x000000b4,
182 0x00000000,
183 0x3f800000,
184 0xc0016900,
185 0x00000096,
186 0x80000000, 167 0x80000000,
187 0xc0016900,
188 0x00000097,
189 0x20002000, 168 0x20002000,
190 0xc0026900,
191 0x000000b6,
192 0x00000000,
193 0x3f800000,
194 0xc0016900,
195 0x00000098,
196 0x80000000, 169 0x80000000,
197 0xc0016900,
198 0x00000099,
199 0x20002000, 170 0x20002000,
200 0xc0026900,
201 0x000000b8,
202 0x00000000,
203 0x3f800000,
204 0xc0016900,
205 0x0000009a,
206 0x80000000, 171 0x80000000,
207 0xc0016900,
208 0x0000009b,
209 0x20002000, 172 0x20002000,
210 0xc0026900,
211 0x000000ba,
212 0x00000000,
213 0x3f800000,
214 0xc0016900,
215 0x0000009c,
216 0x80000000, 173 0x80000000,
217 0xc0016900,
218 0x0000009d,
219 0x20002000, 174 0x20002000,
220 0xc0026900,
221 0x000000bc,
222 0x00000000,
223 0x3f800000,
224 0xc0016900,
225 0x0000009e,
226 0x80000000, 175 0x80000000,
227 0xc0016900,
228 0x0000009f,
229 0x20002000, 176 0x20002000,
230 0xc0026900,
231 0x000000be,
232 0x00000000,
233 0x3f800000,
234 0xc0016900,
235 0x000000a0,
236 0x80000000, 177 0x80000000,
237 0xc0016900,
238 0x000000a1,
239 0x20002000, 178 0x20002000,
240 0xc0026900,
241 0x000000c0,
242 0x00000000,
243 0x3f800000,
244 0xc0016900,
245 0x000000a2,
246 0x80000000, 179 0x80000000,
247 0xc0016900,
248 0x000000a3,
249 0x20002000, 180 0x20002000,
250 0xc0026900,
251 0x000000c2,
252 0x00000000,
253 0x3f800000,
254 0xc0016900,
255 0x000000a4,
256 0x80000000, 181 0x80000000,
257 0xc0016900,
258 0x000000a5,
259 0x20002000, 182 0x20002000,
260 0xc0026900,
261 0x000000c4,
262 0x00000000,
263 0x3f800000,
264 0xc0016900,
265 0x000000a6,
266 0x80000000, 183 0x80000000,
267 0xc0016900,
268 0x000000a7,
269 0x20002000, 184 0x20002000,
270 0xc0026900,
271 0x000000c6,
272 0x00000000,
273 0x3f800000,
274 0xc0016900,
275 0x000000a8,
276 0x80000000, 185 0x80000000,
277 0xc0016900,
278 0x000000a9,
279 0x20002000, 186 0x20002000,
280 0xc0026900,
281 0x000000c8,
282 0x00000000,
283 0x3f800000,
284 0xc0016900,
285 0x000000aa,
286 0x80000000, 187 0x80000000,
287 0xc0016900,
288 0x000000ab,
289 0x20002000, 188 0x20002000,
290 0xc0026900,
291 0x000000ca,
292 0x00000000,
293 0x3f800000,
294 0xc0016900,
295 0x000000ac,
296 0x80000000, 189 0x80000000,
297 0xc0016900,
298 0x000000ad,
299 0x20002000, 190 0x20002000,
300 0xc0026900,
301 0x000000cc,
302 0x00000000,
303 0x3f800000,
304 0xc0016900,
305 0x000000ae,
306 0x80000000, 191 0x80000000,
307 0xc0016900,
308 0x000000af,
309 0x20002000, 192 0x20002000,
310 0xc0026900,
311 0x000000ce,
312 0x00000000,
313 0x3f800000,
314 0xc0016900,
315 0x000000b0,
316 0x80000000, 193 0x80000000,
317 0xc0016900,
318 0x000000b1,
319 0x20002000, 194 0x20002000,
320 0xc0026900, 195 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
321 0x000000d0,
322 0x00000000,
323 0x3f800000, 196 0x3f800000,
324 0xc0016900,
325 0x000000b2,
326 0x80000000,
327 0xc0016900,
328 0x000000b3,
329 0x20002000,
330 0xc0026900,
331 0x000000d2,
332 0x00000000, 197 0x00000000,
333 0x3f800000, 198 0x3f800000,
334 0xc0016900,
335 0x00000293,
336 0x00004010,
337 0xc0016900,
338 0x00000300,
339 0x00000000, 199 0x00000000,
340 0xc0016900, 200 0x3f800000,
341 0x00000301,
342 0x00000000,
343 0xc0016900,
344 0x00000312,
345 0xffffffff,
346 0xc0016900,
347 0x00000307,
348 0x00000000, 201 0x00000000,
349 0xc0016900, 202 0x3f800000,
350 0x00000308,
351 0x00000000, 203 0x00000000,
352 0xc0016900, 204 0x3f800000,
353 0x00000283,
354 0x00000000, 205 0x00000000,
355 0xc0016900, 206 0x3f800000,
356 0x00000292,
357 0x00000000, 207 0x00000000,
358 0xc0066900, 208 0x3f800000,
359 0x0000010f,
360 0x00000000, 209 0x00000000,
210 0x3f800000,
361 0x00000000, 211 0x00000000,
212 0x3f800000,
362 0x00000000, 213 0x00000000,
214 0x3f800000,
363 0x00000000, 215 0x00000000,
216 0x3f800000,
364 0x00000000, 217 0x00000000,
218 0x3f800000,
365 0x00000000, 219 0x00000000,
366 0xc0016900, 220 0x3f800000,
367 0x00000206,
368 0x00000000, 221 0x00000000,
369 0xc0016900, 222 0x3f800000,
370 0x00000207,
371 0x00000000, 223 0x00000000,
372 0xc0016900, 224 0x3f800000,
373 0x00000208,
374 0x00000000, 225 0x00000000,
375 0xc0046900,
376 0x00000303,
377 0x3f800000, 226 0x3f800000,
227
228 0xc0026900,
229 0x00000292,
230 0x00000000, /* PA_SC_MPASS_PS_CNTL */
231 0x00004010, /* PA_SC_MODE_CNTL */
232
233 0xc0096900,
234 0x00000300,
235 0x00000000, /* PA_SC_LINE_CNTL */
236 0x00000000, /* PA_SC_AA_CONFIG */
237 0x0000002d, /* PA_SU_VTX_CNTL */
238 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
378 0x3f800000, 239 0x3f800000,
379 0x3f800000, 240 0x3f800000,
380 0x3f800000, 241 0x3f800000,
381 0xc0016900, 242 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
382 0x00000205,
383 0x00000004,
384 0xc0016900,
385 0x00000280,
386 0x00000000,
387 0xc0016900,
388 0x00000281,
389 0x00000000, 243 0x00000000,
244
390 0xc0016900, 245 0xc0016900,
246 0x00000312,
247 0xffffffff, /* PA_SC_AA_MASK */
248
249 0xc0066900,
391 0x0000037e, 250 0x0000037e,
392 0x00000000, 251 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
393 0xc0016900, 252 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
394 0x00000382, 253 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
395 0x00000000, 254 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
396 0xc0016900, 255 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
397 0x00000380, 256 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
398 0x00000000, 257
399 0xc0016900, 258 0xc0046900,
400 0x00000383,
401 0x00000000,
402 0xc0016900,
403 0x00000381,
404 0x00000000,
405 0xc0016900,
406 0x00000282,
407 0x00000008,
408 0xc0016900,
409 0x00000302,
410 0x0000002d,
411 0xc0016900,
412 0x0000037f,
413 0x00000000,
414 0xc0016900,
415 0x000001b2,
416 0x00000000,
417 0xc0016900,
418 0x000001b6, 259 0x000001b6,
419 0x00000000, 260 0x00000000, /* SPI_INPUT_Z */
420 0xc0016900, 261 0x00000000, /* SPI_FOG_CNTL */
421 0x000001b7, 262 0x00000000, /* SPI_FOG_FUNC_SCALE */
422 0x00000000, 263 0x00000000, /* SPI_FOG_FUNC_BIAS */
423 0xc0016900, 264
424 0x000001b8,
425 0x00000000,
426 0xc0016900,
427 0x000001b9,
428 0x00000000,
429 0xc0016900, 265 0xc0016900,
430 0x00000225, 266 0x00000225,
431 0x00000000, 267 0x00000000, /* SQ_PGM_START_FS */
268
432 0xc0016900, 269 0xc0016900,
433 0x00000229, 270 0x00000229,
434 0x00000000, 271 0x00000000, /* SQ_PGM_RESOURCES_FS */
272
435 0xc0016900, 273 0xc0016900,
436 0x00000237, 274 0x00000237,
437 0x00000000, 275 0x00000000, /* SQ_PGM_CF_OFFSET_FS */
438 0xc0016900, 276
439 0x00000100, 277 0xc0026900,
440 0x00000800,
441 0xc0016900,
442 0x00000101,
443 0x00000000,
444 0xc0016900,
445 0x00000102,
446 0x00000000,
447 0xc0016900,
448 0x000002a8, 278 0x000002a8,
449 0x00000000, 279 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
450 0xc0016900, 280 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
451 0x000002a9, 281
452 0x00000000, 282 0xc0116900,
453 0xc0016900, 283 0x00000280,
454 0x00000103, 284 0x00000000, /* PA_SU_POINT_SIZE */
455 0x00000000, 285 0x00000000, /* PA_SU_POINT_MINMAX */
456 0xc0016900, 286 0x00000008, /* PA_SU_LINE_CNTL */
457 0x00000284, 287 0x00000000, /* PA_SC_LINE_STIPPLE */
458 0x00000000, 288 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
459 0xc0016900, 289 0x00000000, /* VGT_HOS_CNTL */
460 0x00000290, 290 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
461 0x00000000, 291 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
462 0xc0016900, 292 0x00000000, /* VGT_HOS_REUSE_DEPTH */
463 0x00000285, 293 0x00000000, /* VGT_GROUP_PRIM_TYPE */
464 0x00000000, 294 0x00000000, /* VGT_GROUP_FIRST_DECR */
465 0xc0016900, 295 0x00000000, /* VGT_GROUP_DECR */
466 0x00000286, 296 0x00000000, /* VGT_GROUP_VECT_0_CNTL */
467 0x00000000, 297 0x00000000, /* VGT_GROUP_VECT_1_CNTL */
468 0xc0016900, 298 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
469 0x00000287, 299 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
470 0x00000000, 300 0x00000000, /* VGT_GS_MODE */
471 0xc0016900, 301
472 0x00000288,
473 0x00000000,
474 0xc0016900,
475 0x00000289,
476 0x00000000,
477 0xc0016900,
478 0x0000028a,
479 0x00000000,
480 0xc0016900,
481 0x0000028b,
482 0x00000000,
483 0xc0016900,
484 0x0000028c,
485 0x00000000,
486 0xc0016900,
487 0x0000028d,
488 0x00000000,
489 0xc0016900,
490 0x0000028e,
491 0x00000000,
492 0xc0016900,
493 0x0000028f,
494 0x00000000,
495 0xc0016900, 302 0xc0016900,
496 0x000002a1, 303 0x000002a1,
497 0x00000000, 304 0x00000000, /* VGT_PRIMITIVEID_EN */
305
498 0xc0016900, 306 0xc0016900,
499 0x000002a5, 307 0x000002a5,
500 0x00000000, 308 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
501 0xc0016900, 309
310 0xc0036900,
502 0x000002ac, 311 0x000002ac,
503 0x00000000, 312 0x00000000, /* VGT_STRMOUT_EN */
504 0xc0016900, 313 0x00000000, /* VGT_REUSE_OFF */
505 0x000002ad, 314 0x00000000, /* VGT_VTX_CNT_EN */
506 0x00000000, 315
507 0xc0016900,
508 0x000002ae,
509 0x00000000,
510 0xc0016900, 316 0xc0016900,
511 0x000002c8, 317 0x000002c8,
512 0x00000000, 318 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
513 0xc0016900, 319
514 0x00000206, 320 0xc0076900,
515 0x00000100,
516 0xc0016900,
517 0x00000204,
518 0x00010000,
519 0xc0036e00,
520 0x00000000,
521 0x00000012,
522 0x00000000,
523 0x00000000,
524 0xc0016900,
525 0x0000008f,
526 0x0000000f,
527 0xc0016900,
528 0x000001e8,
529 0x00000001,
530 0xc0016900,
531 0x00000202, 321 0x00000202,
532 0x00cc0000, 322 0x00cc0000, /* CB_COLOR_CONTROL */
323 0x00000210, /* DB_SHADER_CNTL */
324 0x00010000, /* PA_CL_CLIP_CNTL */
325 0x00000244, /* PA_SU_SC_MODE_CNTL */
326 0x00000100, /* PA_CL_VTE_CNTL */
327 0x00000000, /* PA_CL_VS_OUT_CNTL */
328 0x00000000, /* PA_CL_NANINF_CNTL */
329
330 0xc0026900,
331 0x0000008e,
332 0x0000000f, /* CB_TARGET_MASK */
333 0x0000000f, /* CB_SHADER_MASK */
334
533 0xc0016900, 335 0xc0016900,
534 0x00000205, 336 0x000001e8,
535 0x00000244, 337 0x00000001, /* CB_SHADER_CONTROL */
338
536 0xc0016900, 339 0xc0016900,
537 0x00000203, 340 0x00000185,
538 0x00000210, 341 0x00000000, /* SPI_VS_OUT_ID_0 */
342
539 0xc0016900, 343 0xc0016900,
344 0x00000191,
345 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
346
347 0xc0056900,
540 0x000001b1, 348 0x000001b1,
349 0x00000000, /* SPI_VS_OUT_CONFIG */
350 0x00000000, /* SPI_THREAD_GROUPING */
351 0x00000001, /* SPI_PS_IN_CONTROL_0 */
352 0x00000000, /* SPI_PS_IN_CONTROL_1 */
353 0x00000000, /* SPI_INTERP_CONTROL_0 */
354
355 0xc0036e00, /* SET_SAMPLER */
541 0x00000000, 356 0x00000000,
542 0xc0016900, 357 0x00000012,
543 0x00000185,
544 0x00000000,
545 0xc0016900,
546 0x000001b3,
547 0x00000001,
548 0xc0016900,
549 0x000001b4,
550 0x00000000, 358 0x00000000,
551 0xc0016900,
552 0x00000191,
553 0x00000b00,
554 0xc0016900,
555 0x000001b5,
556 0x00000000, 359 0x00000000,
557}; 360};
558 361
559const u32 r7xx_default_state[] = 362const u32 r7xx_default_state[] =
560{ 363{
561 0xc0012800, 364 0xc0012800, /* CONTEXT_CONTROL */
562 0x80000000, 365 0x80000000,
563 0x80000000, 366 0x80000000,
367
564 0xc0016800, 368 0xc0016800,
565 0x00000010, 369 0x00000010,
566 0x00008000, 370 0x00008000, /* WAIT_UNTIL */
371
567 0xc0016800, 372 0xc0016800,
568 0x00000542, 373 0x00000542,
569 0x07000002, 374 0x07000002, /* TA_CNTL_AUX */
375
570 0xc0016800, 376 0xc0016800,
571 0x000005c5, 377 0x000005c5,
572 0x00000000, 378 0x00000000, /* VC_ENHANCE */
379
573 0xc0016800, 380 0xc0016800,
574 0x00000363, 381 0x00000363,
575 0x00004000, 382 0x00004000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
383
576 0xc0016800, 384 0xc0016800,
577 0x0000060c, 385 0x0000060c,
578 0x00000000, 386 0x00000000, /* DB_DEBUG */
387
579 0xc0016800, 388 0xc0016800,
580 0x0000060e, 389 0x0000060e,
581 0x00420204, 390 0x00420204, /* DB_WATERMARKS */
582 0xc0016f00, 391
583 0x00000000, 392 0xc0026f00,
584 0x00000000,
585 0xc0016f00,
586 0x00000001,
587 0x00000000, 393 0x00000000,
394 0x00000000, /* SQ_VTX_BASE_VTX_LOC */
395 0x00000000, /* SQ_VTX_START_INST_LOC */
396
588 0xc0096900, 397 0xc0096900,
589 0x0000022a, 398 0x0000022a,
399 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
590 0x00000000, 400 0x00000000,
591 0x00000000, 401 0x00000000,
592 0x00000000, 402 0x00000000,
@@ -595,470 +405,269 @@ const u32 r7xx_default_state[] =
595 0x00000000, 405 0x00000000,
596 0x00000000, 406 0x00000000,
597 0x00000000, 407 0x00000000,
598 0x00000000, 408
599 0xc0016900, 409 0xc0016900,
600 0x00000004, 410 0x00000004,
601 0x00000000, 411 0x00000000, /* DB_DEPTH_INFO */
602 0xc0016900, 412
413 0xc0026900,
603 0x0000000a, 414 0x0000000a,
604 0x00000000, 415 0x00000000, /* DB_STENCIL_CLEAR */
605 0xc0016900, 416 0x00000000, /* DB_DEPTH_CLEAR */
606 0x0000000b, 417
607 0x00000000,
608 0xc0016900,
609 0x0000010c,
610 0x00000000,
611 0xc0016900,
612 0x0000010d,
613 0x00000000,
614 0xc0016900, 418 0xc0016900,
615 0x00000200, 419 0x00000200,
616 0x00000000, 420 0x00000000, /* DB_DEPTH_CONTROL */
617 0xc0016900, 421
422 0xc0026900,
618 0x00000343, 423 0x00000343,
619 0x00000060, 424 0x00000060, /* DB_RENDER_CONTROL */
620 0xc0016900, 425 0x00000000, /* DB_RENDER_OVERRIDE */
621 0x00000344, 426
622 0x00000000,
623 0xc0016900, 427 0xc0016900,
624 0x00000351, 428 0x00000351,
625 0x0000aa00, 429 0x0000aa00, /* DB_ALPHA_TO_MASK */
626 0xc0016900, 430
627 0x00000104, 431 0xc0096900,
628 0x00000000, 432 0x00000100,
629 0xc0016900, 433 0x00000800, /* VGT_MAX_VTX_INDX */
630 0x0000010e, 434 0x00000000, /* VGT_MIN_VTX_INDX */
631 0x00000000, 435 0x00000000, /* VGT_INDX_OFFSET */
632 0xc0046900, 436 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
633 0x00000105, 437 0x00000000, /* SX_ALPHA_TEST_CONTROL */
634 0x00000000, 438 0x00000000, /* CB_BLEND_RED */
635 0x00000000, 439 0x00000000,
636 0x00000000, 440 0x00000000,
637 0x00000000, 441 0x00000000,
442
443 0xc0036900,
444 0x0000010c,
445 0x00000000, /* DB_STENCILREFMASK */
446 0x00000000, /* DB_STENCILREFMASK_BF */
447 0x00000000, /* SX_ALPHA_REF */
448
638 0xc0046900, 449 0xc0046900,
639 0x0000030c, 450 0x0000030c, /* CB_CLRCMP_CNTL */
640 0x01000000, 451 0x01000000,
641 0x00000000, 452 0x00000000,
642 0x00000000, 453 0x00000000,
643 0x00000000, 454 0x00000000,
644 0xc0016900, 455
645 0x0000008e,
646 0x0000000f,
647 0xc0016900, 456 0xc0016900,
648 0x00000080, 457 0x00000080,
649 0x00000000, 458 0x00000000, /* PA_SC_WINDOW_OFFSET */
650 0xc0016900, 459
460 0xc00a6900,
651 0x00000083, 461 0x00000083,
652 0x0000ffff, 462 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
653 0xc0016900, 463 0x00000000, /* PA_SC_CLIPRECT_0_TL */
654 0x00000084,
655 0x00000000,
656 0xc0016900,
657 0x00000085,
658 0x20002000, 464 0x20002000,
659 0xc0016900,
660 0x00000086,
661 0x00000000, 465 0x00000000,
662 0xc0016900,
663 0x00000087,
664 0x20002000, 466 0x20002000,
665 0xc0016900,
666 0x00000088,
667 0x00000000, 467 0x00000000,
668 0xc0016900,
669 0x00000089,
670 0x20002000, 468 0x20002000,
671 0xc0016900,
672 0x0000008a,
673 0x00000000, 469 0x00000000,
674 0xc0016900,
675 0x0000008b,
676 0x20002000, 470 0x20002000,
677 0xc0016900, 471 0xaaaaaaaa, /* PA_SC_EDGERULE */
678 0x0000008c, 472
679 0xaaaaaaaa, 473 0xc0406900,
680 0xc0016900,
681 0x00000094, 474 0x00000094,
682 0x80000000, 475 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
683 0xc0016900, 476 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
684 0x00000095, 477 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
685 0x20002000, 478 0x20002000,
686 0xc0026900,
687 0x000000b4,
688 0x00000000,
689 0x3f800000,
690 0xc0016900,
691 0x00000096,
692 0x80000000, 479 0x80000000,
693 0xc0016900,
694 0x00000097,
695 0x20002000, 480 0x20002000,
696 0xc0026900,
697 0x000000b6,
698 0x00000000,
699 0x3f800000,
700 0xc0016900,
701 0x00000098,
702 0x80000000, 481 0x80000000,
703 0xc0016900,
704 0x00000099,
705 0x20002000, 482 0x20002000,
706 0xc0026900,
707 0x000000b8,
708 0x00000000,
709 0x3f800000,
710 0xc0016900,
711 0x0000009a,
712 0x80000000, 483 0x80000000,
713 0xc0016900,
714 0x0000009b,
715 0x20002000, 484 0x20002000,
716 0xc0026900,
717 0x000000ba,
718 0x00000000,
719 0x3f800000,
720 0xc0016900,
721 0x0000009c,
722 0x80000000, 485 0x80000000,
723 0xc0016900,
724 0x0000009d,
725 0x20002000, 486 0x20002000,
726 0xc0026900,
727 0x000000bc,
728 0x00000000,
729 0x3f800000,
730 0xc0016900,
731 0x0000009e,
732 0x80000000, 487 0x80000000,
733 0xc0016900,
734 0x0000009f,
735 0x20002000, 488 0x20002000,
736 0xc0026900,
737 0x000000be,
738 0x00000000,
739 0x3f800000,
740 0xc0016900,
741 0x000000a0,
742 0x80000000, 489 0x80000000,
743 0xc0016900,
744 0x000000a1,
745 0x20002000, 490 0x20002000,
746 0xc0026900,
747 0x000000c0,
748 0x00000000,
749 0x3f800000,
750 0xc0016900,
751 0x000000a2,
752 0x80000000, 491 0x80000000,
753 0xc0016900,
754 0x000000a3,
755 0x20002000, 492 0x20002000,
756 0xc0026900,
757 0x000000c2,
758 0x00000000,
759 0x3f800000,
760 0xc0016900,
761 0x000000a4,
762 0x80000000, 493 0x80000000,
763 0xc0016900,
764 0x000000a5,
765 0x20002000, 494 0x20002000,
766 0xc0026900,
767 0x000000c4,
768 0x00000000,
769 0x3f800000,
770 0xc0016900,
771 0x000000a6,
772 0x80000000, 495 0x80000000,
773 0xc0016900,
774 0x000000a7,
775 0x20002000, 496 0x20002000,
776 0xc0026900,
777 0x000000c6,
778 0x00000000,
779 0x3f800000,
780 0xc0016900,
781 0x000000a8,
782 0x80000000, 497 0x80000000,
783 0xc0016900,
784 0x000000a9,
785 0x20002000, 498 0x20002000,
786 0xc0026900,
787 0x000000c8,
788 0x00000000,
789 0x3f800000,
790 0xc0016900,
791 0x000000aa,
792 0x80000000, 499 0x80000000,
793 0xc0016900,
794 0x000000ab,
795 0x20002000, 500 0x20002000,
796 0xc0026900,
797 0x000000ca,
798 0x00000000,
799 0x3f800000,
800 0xc0016900,
801 0x000000ac,
802 0x80000000, 501 0x80000000,
803 0xc0016900,
804 0x000000ad,
805 0x20002000, 502 0x20002000,
806 0xc0026900,
807 0x000000cc,
808 0x00000000,
809 0x3f800000,
810 0xc0016900,
811 0x000000ae,
812 0x80000000, 503 0x80000000,
813 0xc0016900,
814 0x000000af,
815 0x20002000, 504 0x20002000,
816 0xc0026900,
817 0x000000ce,
818 0x00000000,
819 0x3f800000,
820 0xc0016900,
821 0x000000b0,
822 0x80000000, 505 0x80000000,
823 0xc0016900,
824 0x000000b1,
825 0x20002000, 506 0x20002000,
826 0xc0026900, 507 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
827 0x000000d0,
828 0x00000000,
829 0x3f800000, 508 0x3f800000,
830 0xc0016900,
831 0x000000b2,
832 0x80000000,
833 0xc0016900,
834 0x000000b3,
835 0x20002000,
836 0xc0026900,
837 0x000000d2,
838 0x00000000, 509 0x00000000,
839 0x3f800000, 510 0x3f800000,
840 0xc0016900,
841 0x00000293,
842 0x00514000,
843 0xc0016900,
844 0x00000300,
845 0x00000000,
846 0xc0016900,
847 0x00000301,
848 0x00000000, 511 0x00000000,
849 0xc0016900, 512 0x3f800000,
850 0x00000312,
851 0xffffffff,
852 0xc0016900,
853 0x00000307,
854 0x00000000, 513 0x00000000,
855 0xc0016900, 514 0x3f800000,
856 0x00000308,
857 0x00000000, 515 0x00000000,
858 0xc0016900, 516 0x3f800000,
859 0x00000283,
860 0x00000000, 517 0x00000000,
861 0xc0016900, 518 0x3f800000,
862 0x00000292,
863 0x00000000, 519 0x00000000,
864 0xc0066900, 520 0x3f800000,
865 0x0000010f,
866 0x00000000, 521 0x00000000,
522 0x3f800000,
867 0x00000000, 523 0x00000000,
524 0x3f800000,
868 0x00000000, 525 0x00000000,
526 0x3f800000,
869 0x00000000, 527 0x00000000,
528 0x3f800000,
870 0x00000000, 529 0x00000000,
530 0x3f800000,
871 0x00000000, 531 0x00000000,
872 0xc0016900, 532 0x3f800000,
873 0x00000206,
874 0x00000000, 533 0x00000000,
875 0xc0016900, 534 0x3f800000,
876 0x00000207,
877 0x00000000, 535 0x00000000,
878 0xc0016900, 536 0x3f800000,
879 0x00000208,
880 0x00000000, 537 0x00000000,
881 0xc0046900,
882 0x00000303,
883 0x3f800000, 538 0x3f800000,
539
540 0xc0026900,
541 0x00000292,
542 0x00000000, /* PA_SC_MPASS_PS_CNTL */
543 0x00514000, /* PA_SC_MODE_CNTL */
544
545 0xc0096900,
546 0x00000300,
547 0x00000000, /* PA_SC_LINE_CNTL */
548 0x00000000, /* PA_SC_AA_CONFIG */
549 0x0000002d, /* PA_SU_VTX_CNTL */
550 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
884 0x3f800000, 551 0x3f800000,
885 0x3f800000, 552 0x3f800000,
886 0x3f800000, 553 0x3f800000,
887 0xc0016900, 554 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
888 0x00000205,
889 0x00000004,
890 0xc0016900,
891 0x00000280,
892 0x00000000,
893 0xc0016900,
894 0x00000281,
895 0x00000000, 555 0x00000000,
556
896 0xc0016900, 557 0xc0016900,
558 0x00000312,
559 0xffffffff, /* PA_SC_AA_MASK */
560
561 0xc0066900,
897 0x0000037e, 562 0x0000037e,
898 0x00000000, 563 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
899 0xc0016900, 564 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
900 0x00000382, 565 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
901 0x00000000, 566 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
902 0xc0016900, 567 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
903 0x00000380, 568 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
904 0x00000000, 569
905 0xc0016900, 570 0xc0046900,
906 0x00000383,
907 0x00000000,
908 0xc0016900,
909 0x00000381,
910 0x00000000,
911 0xc0016900,
912 0x00000282,
913 0x00000008,
914 0xc0016900,
915 0x00000302,
916 0x0000002d,
917 0xc0016900,
918 0x0000037f,
919 0x00000000,
920 0xc0016900,
921 0x000001b2,
922 0x00000001,
923 0xc0016900,
924 0x000001b6, 571 0x000001b6,
925 0x00000000, 572 0x00000000, /* SPI_INPUT_Z */
926 0xc0016900, 573 0x00000000, /* SPI_FOG_CNTL */
927 0x000001b7, 574 0x00000000, /* SPI_FOG_FUNC_SCALE */
928 0x00000000, 575 0x00000000, /* SPI_FOG_FUNC_BIAS */
929 0xc0016900, 576
930 0x000001b8,
931 0x00000000,
932 0xc0016900,
933 0x000001b9,
934 0x00000000,
935 0xc0016900, 577 0xc0016900,
936 0x00000225, 578 0x00000225,
937 0x00000000, 579 0x00000000, /* SQ_PGM_START_FS */
580
938 0xc0016900, 581 0xc0016900,
939 0x00000229, 582 0x00000229,
940 0x00000000, 583 0x00000000, /* SQ_PGM_RESOURCES_FS */
584
941 0xc0016900, 585 0xc0016900,
942 0x00000237, 586 0x00000237,
943 0x00000000, 587 0x00000000, /* SQ_PGM_CF_OFFSET_FS */
944 0xc0016900, 588
945 0x00000100, 589 0xc0026900,
946 0x00000800,
947 0xc0016900,
948 0x00000101,
949 0x00000000,
950 0xc0016900,
951 0x00000102,
952 0x00000000,
953 0xc0016900,
954 0x000002a8, 590 0x000002a8,
955 0x00000000, 591 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
956 0xc0016900, 592 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
957 0x000002a9, 593
958 0x00000000, 594 0xc0116900,
959 0xc0016900, 595 0x00000280,
960 0x00000103, 596 0x00000000, /* PA_SU_POINT_SIZE */
961 0x00000000, 597 0x00000000, /* PA_SU_POINT_MINMAX */
962 0xc0016900, 598 0x00000008, /* PA_SU_LINE_CNTL */
963 0x00000284, 599 0x00000000, /* PA_SC_LINE_STIPPLE */
964 0x00000000, 600 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
965 0xc0016900, 601 0x00000000, /* VGT_HOS_CNTL */
966 0x00000290, 602 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
967 0x00000000, 603 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
968 0xc0016900, 604 0x00000000, /* VGT_HOS_REUSE_DEPTH */
969 0x00000285, 605 0x00000000, /* VGT_GROUP_PRIM_TYPE */
970 0x00000000, 606 0x00000000, /* VGT_GROUP_FIRST_DECR */
971 0xc0016900, 607 0x00000000, /* VGT_GROUP_DECR */
972 0x00000286, 608 0x00000000, /* VGT_GROUP_VECT_0_CNTL */
973 0x00000000, 609 0x00000000, /* VGT_GROUP_VECT_1_CNTL */
974 0xc0016900, 610 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
975 0x00000287, 611 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
976 0x00000000, 612 0x00000000, /* VGT_GS_MODE */
977 0xc0016900, 613
978 0x00000288,
979 0x00000000,
980 0xc0016900,
981 0x00000289,
982 0x00000000,
983 0xc0016900,
984 0x0000028a,
985 0x00000000,
986 0xc0016900,
987 0x0000028b,
988 0x00000000,
989 0xc0016900,
990 0x0000028c,
991 0x00000000,
992 0xc0016900,
993 0x0000028d,
994 0x00000000,
995 0xc0016900,
996 0x0000028e,
997 0x00000000,
998 0xc0016900,
999 0x0000028f,
1000 0x00000000,
1001 0xc0016900, 614 0xc0016900,
1002 0x000002a1, 615 0x000002a1,
1003 0x00000000, 616 0x00000000, /* VGT_PRIMITIVEID_EN */
617
1004 0xc0016900, 618 0xc0016900,
1005 0x000002a5, 619 0x000002a5,
1006 0x00000000, 620 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
1007 0xc0016900, 621
622 0xc0036900,
1008 0x000002ac, 623 0x000002ac,
1009 0x00000000, 624 0x00000000, /* VGT_STRMOUT_EN */
1010 0xc0016900, 625 0x00000000, /* VGT_REUSE_OFF */
1011 0x000002ad, 626 0x00000000, /* VGT_VTX_CNT_EN */
1012 0x00000000, 627
1013 0xc0016900,
1014 0x000002ae,
1015 0x00000000,
1016 0xc0016900, 628 0xc0016900,
1017 0x000002c8, 629 0x000002c8,
1018 0x00000000, 630 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
1019 0xc0016900, 631
1020 0x00000206, 632 0xc0076900,
1021 0x00000100,
1022 0xc0016900,
1023 0x00000204,
1024 0x00010000,
1025 0xc0036e00,
1026 0x00000000,
1027 0x00000012,
1028 0x00000000,
1029 0x00000000,
1030 0xc0016900,
1031 0x0000008f,
1032 0x0000000f,
1033 0xc0016900,
1034 0x000001e8,
1035 0x00000001,
1036 0xc0016900,
1037 0x00000202, 633 0x00000202,
1038 0x00cc0000, 634 0x00cc0000, /* CB_COLOR_CONTROL */
635 0x00000210, /* DB_SHADER_CNTL */
636 0x00010000, /* PA_CL_CLIP_CNTL */
637 0x00000244, /* PA_SU_SC_MODE_CNTL */
638 0x00000100, /* PA_CL_VTE_CNTL */
639 0x00000000, /* PA_CL_VS_OUT_CNTL */
640 0x00000000, /* PA_CL_NANINF_CNTL */
641
642 0xc0026900,
643 0x0000008e,
644 0x0000000f, /* CB_TARGET_MASK */
645 0x0000000f, /* CB_SHADER_MASK */
646
1039 0xc0016900, 647 0xc0016900,
1040 0x00000205, 648 0x000001e8,
1041 0x00000244, 649 0x00000001, /* CB_SHADER_CONTROL */
650
1042 0xc0016900, 651 0xc0016900,
1043 0x00000203, 652 0x00000185,
1044 0x00000210, 653 0x00000000, /* SPI_VS_OUT_ID_0 */
654
1045 0xc0016900, 655 0xc0016900,
656 0x00000191,
657 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
658
659 0xc0056900,
1046 0x000001b1, 660 0x000001b1,
661 0x00000000, /* SPI_VS_OUT_CONFIG */
662 0x00000001, /* SPI_THREAD_GROUPING */
663 0x00000001, /* SPI_PS_IN_CONTROL_0 */
664 0x00000000, /* SPI_PS_IN_CONTROL_1 */
665 0x00000000, /* SPI_INTERP_CONTROL_0 */
666
667 0xc0036e00, /* SET_SAMPLER */
1047 0x00000000, 668 0x00000000,
1048 0xc0016900, 669 0x00000012,
1049 0x00000185,
1050 0x00000000,
1051 0xc0016900,
1052 0x000001b3,
1053 0x00000001,
1054 0xc0016900,
1055 0x000001b4,
1056 0x00000000, 670 0x00000000,
1057 0xc0016900,
1058 0x00000191,
1059 0x00000b00,
1060 0xc0016900,
1061 0x000001b5,
1062 0x00000000, 671 0x00000000,
1063}; 672};
1064 673
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index c39c1bc13016..c3ea212e0c3c 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -25,6 +25,7 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/kernel.h>
28#include "drmP.h" 29#include "drmP.h"
29#include "radeon.h" 30#include "radeon.h"
30#include "r600d.h" 31#include "r600d.h"
@@ -166,7 +167,7 @@ static void r600_cs_track_init(struct r600_cs_track *track)
166static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) 167static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
167{ 168{
168 struct r600_cs_track *track = p->track; 169 struct r600_cs_track *track = p->track;
169 u32 bpe = 0, pitch, slice_tile_max, size, tmp, height; 170 u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align;
170 volatile u32 *ib = p->ib->ptr; 171 volatile u32 *ib = p->ib->ptr;
171 172
172 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { 173 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
@@ -180,56 +181,57 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
180 i, track->cb_color_info[i]); 181 i, track->cb_color_info[i]);
181 return -EINVAL; 182 return -EINVAL;
182 } 183 }
183 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3; 184 /* pitch is the number of 8x8 tiles per row */
185 pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1;
184 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; 186 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
185 if (!pitch) { 187 height = size / (pitch * 8 * bpe);
186 dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n",
187 __func__, __LINE__, pitch, i, track->cb_color_size[i]);
188 return -EINVAL;
189 }
190 height = size / (pitch * bpe);
191 if (height > 8192) 188 if (height > 8192)
192 height = 8192; 189 height = 8192;
190 if (height > 7)
191 height &= ~0x7;
193 switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) { 192 switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
194 case V_0280A0_ARRAY_LINEAR_GENERAL: 193 case V_0280A0_ARRAY_LINEAR_GENERAL:
194 /* technically height & 0x7 */
195 break;
195 case V_0280A0_ARRAY_LINEAR_ALIGNED: 196 case V_0280A0_ARRAY_LINEAR_ALIGNED:
196 if (pitch & 0x3f) { 197 pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
197 dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n", 198 if (!IS_ALIGNED(pitch, pitch_align)) {
198 __func__, __LINE__, pitch, bpe, pitch * bpe); 199 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
200 __func__, __LINE__, pitch);
199 return -EINVAL; 201 return -EINVAL;
200 } 202 }
201 if ((pitch * bpe) & (track->group_size - 1)) { 203 if (!IS_ALIGNED(height, 8)) {
202 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", 204 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
203 __func__, __LINE__, pitch); 205 __func__, __LINE__, height);
204 return -EINVAL; 206 return -EINVAL;
205 } 207 }
206 break; 208 break;
207 case V_0280A0_ARRAY_1D_TILED_THIN1: 209 case V_0280A0_ARRAY_1D_TILED_THIN1:
208 if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) { 210 pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8;
211 if (!IS_ALIGNED(pitch, pitch_align)) {
209 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", 212 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
210 __func__, __LINE__, pitch); 213 __func__, __LINE__, pitch);
214 return -EINVAL;
215 }
216 if (!IS_ALIGNED(height, 8)) {
217 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
218 __func__, __LINE__, height);
211 return -EINVAL; 219 return -EINVAL;
212 } 220 }
213 height &= ~0x7;
214 if (!height)
215 height = 8;
216 break; 221 break;
217 case V_0280A0_ARRAY_2D_TILED_THIN1: 222 case V_0280A0_ARRAY_2D_TILED_THIN1:
218 if (pitch & ((8 * track->nbanks) - 1)) { 223 pitch_align = max((u32)track->nbanks,
224 (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks));
225 if (!IS_ALIGNED(pitch, pitch_align)) {
219 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", 226 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
220 __func__, __LINE__, pitch); 227 __func__, __LINE__, pitch);
221 return -EINVAL; 228 return -EINVAL;
222 } 229 }
223 tmp = pitch * 8 * bpe * track->nsamples; 230 if (!IS_ALIGNED((height / 8), track->nbanks)) {
224 tmp = tmp / track->nbanks; 231 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
225 if (tmp & (track->group_size - 1)) { 232 __func__, __LINE__, height);
226 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
227 __func__, __LINE__, pitch);
228 return -EINVAL; 233 return -EINVAL;
229 } 234 }
230 height &= ~((16 * track->npipes) - 1);
231 if (!height)
232 height = 16 * track->npipes;
233 break; 235 break;
234 default: 236 default:
235 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 237 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -238,16 +240,20 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
238 return -EINVAL; 240 return -EINVAL;
239 } 241 }
240 /* check offset */ 242 /* check offset */
241 tmp = height * pitch; 243 tmp = height * pitch * 8 * bpe;
242 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 244 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
243 dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]); 245 dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]);
246 return -EINVAL;
247 }
248 if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) {
249 dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]);
244 return -EINVAL; 250 return -EINVAL;
245 } 251 }
246 /* limit max tile */ 252 /* limit max tile */
247 tmp = (height * pitch) >> 6; 253 tmp = (height * pitch * 8) >> 6;
248 if (tmp < slice_tile_max) 254 if (tmp < slice_tile_max)
249 slice_tile_max = tmp; 255 slice_tile_max = tmp;
250 tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) | 256 tmp = S_028060_PITCH_TILE_MAX(pitch - 1) |
251 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 257 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
252 ib[track->cb_color_size_idx[i]] = tmp; 258 ib[track->cb_color_size_idx[i]] = tmp;
253 return 0; 259 return 0;
@@ -289,7 +295,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
289 /* Check depth buffer */ 295 /* Check depth buffer */
290 if (G_028800_STENCIL_ENABLE(track->db_depth_control) || 296 if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
291 G_028800_Z_ENABLE(track->db_depth_control)) { 297 G_028800_Z_ENABLE(track->db_depth_control)) {
292 u32 nviews, bpe, ntiles; 298 u32 nviews, bpe, ntiles, pitch, pitch_align, height, size;
293 if (track->db_bo == NULL) { 299 if (track->db_bo == NULL) {
294 dev_warn(p->dev, "z/stencil with no depth buffer\n"); 300 dev_warn(p->dev, "z/stencil with no depth buffer\n");
295 return -EINVAL; 301 return -EINVAL;
@@ -332,6 +338,51 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
332 } 338 }
333 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); 339 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
334 } else { 340 } else {
341 size = radeon_bo_size(track->db_bo);
342 pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1;
343 height = size / (pitch * 8 * bpe);
344 height &= ~0x7;
345 if (!height)
346 height = 8;
347
348 switch (G_028010_ARRAY_MODE(track->db_depth_info)) {
349 case V_028010_ARRAY_1D_TILED_THIN1:
350 pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8);
351 if (!IS_ALIGNED(pitch, pitch_align)) {
352 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
353 __func__, __LINE__, pitch);
354 return -EINVAL;
355 }
356 if (!IS_ALIGNED(height, 8)) {
357 dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
358 __func__, __LINE__, height);
359 return -EINVAL;
360 }
361 break;
362 case V_028010_ARRAY_2D_TILED_THIN1:
363 pitch_align = max((u32)track->nbanks,
364 (u32)(((track->group_size / 8) / bpe) * track->nbanks));
365 if (!IS_ALIGNED(pitch, pitch_align)) {
366 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
367 __func__, __LINE__, pitch);
368 return -EINVAL;
369 }
370 if ((height / 8) & (track->nbanks - 1)) {
371 dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
372 __func__, __LINE__, height);
373 return -EINVAL;
374 }
375 break;
376 default:
377 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
378 G_028010_ARRAY_MODE(track->db_depth_info),
379 track->db_depth_info);
380 return -EINVAL;
381 }
382 if (!IS_ALIGNED(track->db_offset, track->group_size)) {
383 dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset);
384 return -EINVAL;
385 }
335 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 386 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
336 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 387 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
337 tmp = ntiles * bpe * 64 * nviews; 388 tmp = ntiles * bpe * 64 * nviews;
@@ -585,7 +636,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
585 header = radeon_get_ib_value(p, h_idx); 636 header = radeon_get_ib_value(p, h_idx);
586 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 637 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
587 reg = CP_PACKET0_GET_REG(header); 638 reg = CP_PACKET0_GET_REG(header);
588 mutex_lock(&p->rdev->ddev->mode_config.mutex); 639
589 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 640 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
590 if (!obj) { 641 if (!obj) {
591 DRM_ERROR("cannot find crtc %d\n", crtc_id); 642 DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -620,7 +671,6 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
620 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; 671 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
621 } 672 }
622out: 673out:
623 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
624 return r; 674 return r;
625} 675}
626 676
@@ -725,7 +775,25 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
725 track->db_depth_control = radeon_get_ib_value(p, idx); 775 track->db_depth_control = radeon_get_ib_value(p, idx);
726 break; 776 break;
727 case R_028010_DB_DEPTH_INFO: 777 case R_028010_DB_DEPTH_INFO:
728 track->db_depth_info = radeon_get_ib_value(p, idx); 778 if (r600_cs_packet_next_is_pkt3_nop(p)) {
779 r = r600_cs_packet_next_reloc(p, &reloc);
780 if (r) {
781 dev_warn(p->dev, "bad SET_CONTEXT_REG "
782 "0x%04X\n", reg);
783 return -EINVAL;
784 }
785 track->db_depth_info = radeon_get_ib_value(p, idx);
786 ib[idx] &= C_028010_ARRAY_MODE;
787 track->db_depth_info &= C_028010_ARRAY_MODE;
788 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
789 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
790 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
791 } else {
792 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
793 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
794 }
795 } else
796 track->db_depth_info = radeon_get_ib_value(p, idx);
729 break; 797 break;
730 case R_028004_DB_DEPTH_VIEW: 798 case R_028004_DB_DEPTH_VIEW:
731 track->db_depth_view = radeon_get_ib_value(p, idx); 799 track->db_depth_view = radeon_get_ib_value(p, idx);
@@ -758,8 +826,25 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
758 case R_0280B4_CB_COLOR5_INFO: 826 case R_0280B4_CB_COLOR5_INFO:
759 case R_0280B8_CB_COLOR6_INFO: 827 case R_0280B8_CB_COLOR6_INFO:
760 case R_0280BC_CB_COLOR7_INFO: 828 case R_0280BC_CB_COLOR7_INFO:
761 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 829 if (r600_cs_packet_next_is_pkt3_nop(p)) {
762 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 830 r = r600_cs_packet_next_reloc(p, &reloc);
831 if (r) {
832 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
833 return -EINVAL;
834 }
835 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
836 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
837 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
838 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
839 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
840 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
841 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
842 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
843 }
844 } else {
845 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
846 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
847 }
763 break; 848 break;
764 case R_028060_CB_COLOR0_SIZE: 849 case R_028060_CB_COLOR0_SIZE:
765 case R_028064_CB_COLOR1_SIZE: 850 case R_028064_CB_COLOR1_SIZE:
@@ -947,8 +1032,9 @@ static inline unsigned minify(unsigned size, unsigned levels)
947} 1032}
948 1033
949static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels, 1034static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
950 unsigned w0, unsigned h0, unsigned d0, unsigned bpe, 1035 unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
951 unsigned *l0_size, unsigned *mipmap_size) 1036 unsigned pitch_align,
1037 unsigned *l0_size, unsigned *mipmap_size)
952{ 1038{
953 unsigned offset, i, level, face; 1039 unsigned offset, i, level, face;
954 unsigned width, height, depth, rowstride, size; 1040 unsigned width, height, depth, rowstride, size;
@@ -961,13 +1047,13 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels
961 height = minify(h0, i); 1047 height = minify(h0, i);
962 depth = minify(d0, i); 1048 depth = minify(d0, i);
963 for(face = 0; face < nfaces; face++) { 1049 for(face = 0; face < nfaces; face++) {
964 rowstride = ((width * bpe) + 255) & ~255; 1050 rowstride = ALIGN((width * bpe), pitch_align);
965 size = height * rowstride * depth; 1051 size = height * rowstride * depth;
966 offset += size; 1052 offset += size;
967 offset = (offset + 0x1f) & ~0x1f; 1053 offset = (offset + 0x1f) & ~0x1f;
968 } 1054 }
969 } 1055 }
970 *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0; 1056 *l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0;
971 *mipmap_size = offset; 1057 *mipmap_size = offset;
972 if (!blevel) 1058 if (!blevel)
973 *mipmap_size -= *l0_size; 1059 *mipmap_size -= *l0_size;
@@ -986,16 +1072,23 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels
986 * the texture and mipmap bo object are big enough to cover this resource. 1072 * the texture and mipmap bo object are big enough to cover this resource.
987 */ 1073 */
988static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, 1074static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
989 struct radeon_bo *texture, 1075 struct radeon_bo *texture,
990 struct radeon_bo *mipmap) 1076 struct radeon_bo *mipmap,
1077 u32 tiling_flags)
991{ 1078{
1079 struct r600_cs_track *track = p->track;
992 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; 1080 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
993 u32 word0, word1, l0_size, mipmap_size; 1081 u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align;
994 1082
995 /* on legacy kernel we don't perform advanced check */ 1083 /* on legacy kernel we don't perform advanced check */
996 if (p->rdev == NULL) 1084 if (p->rdev == NULL)
997 return 0; 1085 return 0;
1086
998 word0 = radeon_get_ib_value(p, idx + 0); 1087 word0 = radeon_get_ib_value(p, idx + 0);
1088 if (tiling_flags & RADEON_TILING_MACRO)
1089 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1090 else if (tiling_flags & RADEON_TILING_MICRO)
1091 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
999 word1 = radeon_get_ib_value(p, idx + 1); 1092 word1 = radeon_get_ib_value(p, idx + 1);
1000 w0 = G_038000_TEX_WIDTH(word0) + 1; 1093 w0 = G_038000_TEX_WIDTH(word0) + 1;
1001 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1094 h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1022,11 +1115,55 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1022 __func__, __LINE__, G_038004_DATA_FORMAT(word1)); 1115 __func__, __LINE__, G_038004_DATA_FORMAT(word1));
1023 return -EINVAL; 1116 return -EINVAL;
1024 } 1117 }
1118
1119 pitch = G_038000_PITCH(word0) + 1;
1120 switch (G_038000_TILE_MODE(word0)) {
1121 case V_038000_ARRAY_LINEAR_GENERAL:
1122 pitch_align = 1;
1123 /* XXX check height align */
1124 break;
1125 case V_038000_ARRAY_LINEAR_ALIGNED:
1126 pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
1127 if (!IS_ALIGNED(pitch, pitch_align)) {
1128 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1129 __func__, __LINE__, pitch);
1130 return -EINVAL;
1131 }
1132 /* XXX check height align */
1133 break;
1134 case V_038000_ARRAY_1D_TILED_THIN1:
1135 pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8;
1136 if (!IS_ALIGNED(pitch, pitch_align)) {
1137 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1138 __func__, __LINE__, pitch);
1139 return -EINVAL;
1140 }
1141 /* XXX check height align */
1142 break;
1143 case V_038000_ARRAY_2D_TILED_THIN1:
1144 pitch_align = max((u32)track->nbanks,
1145 (u32)(((track->group_size / 8) / bpe) * track->nbanks));
1146 if (!IS_ALIGNED(pitch, pitch_align)) {
1147 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1148 __func__, __LINE__, pitch);
1149 return -EINVAL;
1150 }
1151 /* XXX check height align */
1152 break;
1153 default:
1154 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
1155 G_038000_TILE_MODE(word0), word0);
1156 return -EINVAL;
1157 }
1158 /* XXX check offset align */
1159
1025 word0 = radeon_get_ib_value(p, idx + 4); 1160 word0 = radeon_get_ib_value(p, idx + 4);
1026 word1 = radeon_get_ib_value(p, idx + 5); 1161 word1 = radeon_get_ib_value(p, idx + 5);
1027 blevel = G_038010_BASE_LEVEL(word0); 1162 blevel = G_038010_BASE_LEVEL(word0);
1028 nlevels = G_038014_LAST_LEVEL(word1); 1163 nlevels = G_038014_LAST_LEVEL(word1);
1029 r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size); 1164 r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe,
1165 (pitch_align * bpe),
1166 &l0_size, &mipmap_size);
1030 /* using get ib will give us the offset into the texture bo */ 1167 /* using get ib will give us the offset into the texture bo */
1031 word0 = radeon_get_ib_value(p, idx + 2); 1168 word0 = radeon_get_ib_value(p, idx + 2);
1032 if ((l0_size + word0) > radeon_bo_size(texture)) { 1169 if ((l0_size + word0) > radeon_bo_size(texture)) {
@@ -1240,6 +1377,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1240 return -EINVAL; 1377 return -EINVAL;
1241 } 1378 }
1242 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1379 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1380 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1381 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1382 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1383 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1243 texture = reloc->robj; 1384 texture = reloc->robj;
1244 /* tex mip base */ 1385 /* tex mip base */
1245 r = r600_cs_packet_next_reloc(p, &reloc); 1386 r = r600_cs_packet_next_reloc(p, &reloc);
@@ -1250,7 +1391,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1250 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1391 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1251 mipmap = reloc->robj; 1392 mipmap = reloc->robj;
1252 r = r600_check_texture_resource(p, idx+(i*7)+1, 1393 r = r600_check_texture_resource(p, idx+(i*7)+1,
1253 texture, mipmap); 1394 texture, mipmap, reloc->lobj.tiling_flags);
1254 if (r) 1395 if (r)
1255 return r; 1396 return r;
1256 break; 1397 break;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 26b4bc9d89a5..e6a58ed48dcf 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -435,7 +435,8 @@ static int r600_hdmi_find_free_block(struct drm_device *dev)
435 } 435 }
436 } 436 }
437 437
438 if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) { 438 if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 ||
439 rdev->family == CHIP_RS740) {
439 return free_blocks[0] ? R600_HDMI_BLOCK1 : 0; 440 return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
440 } else if (rdev->family >= CHIP_R600) { 441 } else if (rdev->family >= CHIP_R600) {
441 if (free_blocks[0]) 442 if (free_blocks[0])
@@ -466,7 +467,8 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder)
466 if (ASIC_IS_DCE32(rdev)) 467 if (ASIC_IS_DCE32(rdev))
467 radeon_encoder->hdmi_config_offset = dig->dig_encoder ? 468 radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
468 R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1; 469 R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
469 } else if (rdev->family >= CHIP_R600) { 470 } else if (rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 ||
471 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
470 radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev); 472 radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
471 } 473 }
472} 474}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 59c1f8793e60..858a1920c0d7 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -239,12 +239,18 @@
239#define GRBM_SOFT_RESET 0x8020 239#define GRBM_SOFT_RESET 0x8020
240#define SOFT_RESET_CP (1<<0) 240#define SOFT_RESET_CP (1<<0)
241 241
242#define CG_THERMAL_STATUS 0x7F4
243#define ASIC_T(x) ((x) << 0)
244#define ASIC_T_MASK 0x1FF
245#define ASIC_T_SHIFT 0
246
242#define HDP_HOST_PATH_CNTL 0x2C00 247#define HDP_HOST_PATH_CNTL 0x2C00
243#define HDP_NONSURFACE_BASE 0x2C04 248#define HDP_NONSURFACE_BASE 0x2C04
244#define HDP_NONSURFACE_INFO 0x2C08 249#define HDP_NONSURFACE_INFO 0x2C08
245#define HDP_NONSURFACE_SIZE 0x2C0C 250#define HDP_NONSURFACE_SIZE 0x2C0C
246#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 251#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
247#define HDP_TILING_CONFIG 0x2F3C 252#define HDP_TILING_CONFIG 0x2F3C
253#define HDP_DEBUG1 0x2F34
248 254
249#define MC_VM_AGP_TOP 0x2184 255#define MC_VM_AGP_TOP 0x2184
250#define MC_VM_AGP_BOT 0x2188 256#define MC_VM_AGP_BOT 0x2188
@@ -1154,6 +1160,10 @@
1154#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3) 1160#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
1155#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF) 1161#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
1156#define C_038000_TILE_MODE 0xFFFFFF87 1162#define C_038000_TILE_MODE 0xFFFFFF87
1163#define V_038000_ARRAY_LINEAR_GENERAL 0x00000000
1164#define V_038000_ARRAY_LINEAR_ALIGNED 0x00000001
1165#define V_038000_ARRAY_1D_TILED_THIN1 0x00000002
1166#define V_038000_ARRAY_2D_TILED_THIN1 0x00000004
1157#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7) 1167#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
1158#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1) 1168#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
1159#define C_038000_TILE_TYPE 0xFFFFFF7F 1169#define C_038000_TILE_TYPE 0xFFFFFF7F
@@ -1357,6 +1367,8 @@
1357#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15) 1367#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
1358#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF) 1368#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
1359#define C_028010_ARRAY_MODE 0xFFF87FFF 1369#define C_028010_ARRAY_MODE 0xFFF87FFF
1370#define V_028010_ARRAY_1D_TILED_THIN1 0x00000002
1371#define V_028010_ARRAY_2D_TILED_THIN1 0x00000004
1360#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25) 1372#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
1361#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1) 1373#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
1362#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF 1374#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ab61aaa887bb..c84f9a311550 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -178,6 +178,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev);
178void radeon_atombios_get_power_modes(struct radeon_device *rdev); 178void radeon_atombios_get_power_modes(struct radeon_device *rdev);
179void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); 179void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
180void rs690_pm_info(struct radeon_device *rdev); 180void rs690_pm_info(struct radeon_device *rdev);
181extern u32 rv6xx_get_temp(struct radeon_device *rdev);
182extern u32 rv770_get_temp(struct radeon_device *rdev);
183extern u32 evergreen_get_temp(struct radeon_device *rdev);
181 184
182/* 185/*
183 * Fences. 186 * Fences.
@@ -351,6 +354,7 @@ struct radeon_mc {
351 int vram_mtrr; 354 int vram_mtrr;
352 bool vram_is_ddr; 355 bool vram_is_ddr;
353 bool igp_sideport_enabled; 356 bool igp_sideport_enabled;
357 u64 gtt_base_align;
354}; 358};
355 359
356bool radeon_combios_sideport_present(struct radeon_device *rdev); 360bool radeon_combios_sideport_present(struct radeon_device *rdev);
@@ -670,6 +674,13 @@ struct radeon_pm_profile {
670 int dpms_on_cm_idx; 674 int dpms_on_cm_idx;
671}; 675};
672 676
677enum radeon_int_thermal_type {
678 THERMAL_TYPE_NONE,
679 THERMAL_TYPE_RV6XX,
680 THERMAL_TYPE_RV770,
681 THERMAL_TYPE_EVERGREEN,
682};
683
673struct radeon_voltage { 684struct radeon_voltage {
674 enum radeon_voltage_type type; 685 enum radeon_voltage_type type;
675 /* gpio voltage */ 686 /* gpio voltage */
@@ -765,6 +776,9 @@ struct radeon_pm {
765 enum radeon_pm_profile_type profile; 776 enum radeon_pm_profile_type profile;
766 int profile_index; 777 int profile_index;
767 struct radeon_pm_profile profiles[PM_PROFILE_MAX]; 778 struct radeon_pm_profile profiles[PM_PROFILE_MAX];
779 /* internal thermal controller on rv6xx+ */
780 enum radeon_int_thermal_type int_thermal_type;
781 struct device *int_hwmon_dev;
768}; 782};
769 783
770 784
@@ -901,6 +915,7 @@ struct r600_asic {
901 unsigned tiling_nbanks; 915 unsigned tiling_nbanks;
902 unsigned tiling_npipes; 916 unsigned tiling_npipes;
903 unsigned tiling_group_size; 917 unsigned tiling_group_size;
918 unsigned tile_config;
904 struct r100_gpu_lockup lockup; 919 struct r100_gpu_lockup lockup;
905}; 920};
906 921
@@ -925,6 +940,7 @@ struct rv770_asic {
925 unsigned tiling_nbanks; 940 unsigned tiling_nbanks;
926 unsigned tiling_npipes; 941 unsigned tiling_npipes;
927 unsigned tiling_group_size; 942 unsigned tiling_group_size;
943 unsigned tile_config;
928 struct r100_gpu_lockup lockup; 944 struct r100_gpu_lockup lockup;
929}; 945};
930 946
@@ -950,6 +966,7 @@ struct evergreen_asic {
950 unsigned tiling_nbanks; 966 unsigned tiling_nbanks;
951 unsigned tiling_npipes; 967 unsigned tiling_npipes;
952 unsigned tiling_group_size; 968 unsigned tiling_group_size;
969 unsigned tile_config;
953}; 970};
954 971
955union radeon_asic_config { 972union radeon_asic_config {
@@ -1032,6 +1049,9 @@ struct radeon_device {
1032 uint32_t pcie_reg_mask; 1049 uint32_t pcie_reg_mask;
1033 radeon_rreg_t pciep_rreg; 1050 radeon_rreg_t pciep_rreg;
1034 radeon_wreg_t pciep_wreg; 1051 radeon_wreg_t pciep_wreg;
1052 /* io port */
1053 void __iomem *rio_mem;
1054 resource_size_t rio_mem_size;
1035 struct radeon_clock clock; 1055 struct radeon_clock clock;
1036 struct radeon_mc mc; 1056 struct radeon_mc mc;
1037 struct radeon_gart gart; 1057 struct radeon_gart gart;
@@ -1068,6 +1088,7 @@ struct radeon_device {
1068 struct mutex vram_mutex; 1088 struct mutex vram_mutex;
1069 1089
1070 /* audio stuff */ 1090 /* audio stuff */
1091 bool audio_enabled;
1071 struct timer_list audio_timer; 1092 struct timer_list audio_timer;
1072 int audio_channels; 1093 int audio_channels;
1073 int audio_rate; 1094 int audio_rate;
@@ -1113,6 +1134,26 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
1113 } 1134 }
1114} 1135}
1115 1136
1137static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
1138{
1139 if (reg < rdev->rio_mem_size)
1140 return ioread32(rdev->rio_mem + reg);
1141 else {
1142 iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
1143 return ioread32(rdev->rio_mem + RADEON_MM_DATA);
1144 }
1145}
1146
1147static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1148{
1149 if (reg < rdev->rio_mem_size)
1150 iowrite32(v, rdev->rio_mem + reg);
1151 else {
1152 iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
1153 iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
1154 }
1155}
1156
1116/* 1157/*
1117 * Cast helper 1158 * Cast helper
1118 */ 1159 */
@@ -1151,6 +1192,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
1151 WREG32_PLL(reg, tmp_); \ 1192 WREG32_PLL(reg, tmp_); \
1152 } while (0) 1193 } while (0)
1153#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg))) 1194#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
1195#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
1196#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
1154 1197
1155/* 1198/*
1156 * Indirect registers accessor 1199 * Indirect registers accessor
@@ -1414,6 +1457,13 @@ extern void r700_cp_fini(struct radeon_device *rdev);
1414extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); 1457extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
1415extern int evergreen_irq_set(struct radeon_device *rdev); 1458extern int evergreen_irq_set(struct radeon_device *rdev);
1416 1459
1460/* radeon_acpi.c */
1461#if defined(CONFIG_ACPI)
1462extern int radeon_acpi_init(struct radeon_device *rdev);
1463#else
1464static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
1465#endif
1466
1417/* evergreen */ 1467/* evergreen */
1418struct evergreen_mc_save { 1468struct evergreen_mc_save {
1419 u32 vga_control[6]; 1469 u32 vga_control[6];
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
new file mode 100644
index 000000000000..e366434035cb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -0,0 +1,67 @@
1#include <linux/pci.h>
2#include <linux/acpi.h>
3#include <linux/slab.h>
4#include <acpi/acpi_drivers.h>
5#include <acpi/acpi_bus.h>
6
7#include "drmP.h"
8#include "drm.h"
9#include "drm_sarea.h"
10#include "drm_crtc_helper.h"
11#include "radeon.h"
12
13#include <linux/vga_switcheroo.h>
14
15/* Call the ATIF method
16 *
17 * Note: currently we discard the output
18 */
19static int radeon_atif_call(acpi_handle handle)
20{
21 acpi_status status;
22 union acpi_object atif_arg_elements[2];
23 struct acpi_object_list atif_arg;
24 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
25
26 atif_arg.count = 2;
27 atif_arg.pointer = &atif_arg_elements[0];
28
29 atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
30 atif_arg_elements[0].integer.value = 0;
31 atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
32 atif_arg_elements[1].integer.value = 0;
33
34 status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
35
36 /* Fail only if calling the method fails and ATIF is supported */
37 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
38 printk(KERN_INFO "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
39 kfree(buffer.pointer);
40 return 1;
41 }
42
43 kfree(buffer.pointer);
44 return 0;
45}
46
47/* Call all ACPI methods here */
48int radeon_acpi_init(struct radeon_device *rdev)
49{
50 acpi_handle handle;
51 int ret;
52
53 /* No need to proceed if we're sure that ATIF is not supported */
54 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
55 return 0;
56
57 /* Get the device handle */
58 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
59
60 /* Call the ATIF method */
61 ret = radeon_atif_call(handle);
62 if (ret)
63 return ret;
64
65 return 0;
66}
67
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c0bbaa64157a..a5aff755f0d2 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -113,6 +113,7 @@ void r100_wb_fini(struct radeon_device *rdev);
113int r100_wb_init(struct radeon_device *rdev); 113int r100_wb_init(struct radeon_device *rdev);
114int r100_cp_reset(struct radeon_device *rdev); 114int r100_cp_reset(struct radeon_device *rdev);
115void r100_vga_render_disable(struct radeon_device *rdev); 115void r100_vga_render_disable(struct radeon_device *rdev);
116void r100_restore_sanity(struct radeon_device *rdev);
116int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 117int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
117 struct radeon_cs_packet *pkt, 118 struct radeon_cs_packet *pkt,
118 struct radeon_bo *robj); 119 struct radeon_bo *robj);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 99bd8a9c56b3..0a97aeb083dd 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -280,6 +280,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
280 } 280 }
281 } 281 }
282 282
283 /* ASUS HD 3600 board lists the DVI port as HDMI */
284 if ((dev->pdev->device == 0x9598) &&
285 (dev->pdev->subsystem_vendor == 0x1043) &&
286 (dev->pdev->subsystem_device == 0x01e4)) {
287 if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
288 *connector_type = DRM_MODE_CONNECTOR_DVII;
289 }
290 }
291
283 /* ASUS HD 3450 board lists the DVI port as HDMI */ 292 /* ASUS HD 3450 board lists the DVI port as HDMI */
284 if ((dev->pdev->device == 0x95C5) && 293 if ((dev->pdev->device == 0x95C5) &&
285 (dev->pdev->subsystem_vendor == 0x1043) && 294 (dev->pdev->subsystem_vendor == 0x1043) &&
@@ -1029,8 +1038,15 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1029 data_offset); 1038 data_offset);
1030 switch (crev) { 1039 switch (crev) {
1031 case 1: 1040 case 1:
1032 if (igp_info->info.ucMemoryType & 0xf0) 1041 /* AMD IGPS */
1033 return true; 1042 if ((rdev->family == CHIP_RS690) ||
1043 (rdev->family == CHIP_RS740)) {
1044 if (igp_info->info.ulBootUpMemoryClock)
1045 return true;
1046 } else {
1047 if (igp_info->info.ucMemoryType & 0xf0)
1048 return true;
1049 }
1034 break; 1050 break;
1035 case 2: 1051 case 2:
1036 if (igp_info->info_2.ucMemoryType & 0x0f) 1052 if (igp_info->info_2.ucMemoryType & 0x0f)
@@ -1773,14 +1789,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1773 } 1789 }
1774 1790
1775 /* add the i2c bus for thermal/fan chip */ 1791 /* add the i2c bus for thermal/fan chip */
1776 /* no support for internal controller yet */
1777 if (controller->ucType > 0) { 1792 if (controller->ucType > 0) {
1778 if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || 1793 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
1779 (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || 1794 DRM_INFO("Internal thermal controller %s fan control\n",
1780 (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) { 1795 (controller->ucFanParameters &
1796 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
1797 rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
1798 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
1799 DRM_INFO("Internal thermal controller %s fan control\n",
1800 (controller->ucFanParameters &
1801 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
1802 rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
1803 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
1781 DRM_INFO("Internal thermal controller %s fan control\n", 1804 DRM_INFO("Internal thermal controller %s fan control\n",
1782 (controller->ucFanParameters & 1805 (controller->ucFanParameters &
1783 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 1806 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
1807 rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
1784 } else if ((controller->ucType == 1808 } else if ((controller->ucType ==
1785 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || 1809 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
1786 (controller->ucType == 1810 (controller->ucType ==
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index d1c1d8dd93ce..5e45cb27eb98 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2941,9 +2941,8 @@ static void combios_write_ram_size(struct drm_device *dev)
2941 if (rev < 3) { 2941 if (rev < 3) {
2942 mem_cntl = RBIOS32(offset + 1); 2942 mem_cntl = RBIOS32(offset + 1);
2943 mem_size = RBIOS16(offset + 5); 2943 mem_size = RBIOS16(offset + 5);
2944 if (((rdev->flags & RADEON_FAMILY_MASK) < CHIP_R200) && 2944 if ((rdev->family < CHIP_R200) &&
2945 ((dev->pdev->device != 0x515e) 2945 !ASIC_IS_RN50(rdev))
2946 && (dev->pdev->device != 0x5969)))
2947 WREG32(RADEON_MEM_CNTL, mem_cntl); 2946 WREG32(RADEON_MEM_CNTL, mem_cntl);
2948 } 2947 }
2949 } 2948 }
@@ -2954,10 +2953,8 @@ static void combios_write_ram_size(struct drm_device *dev)
2954 if (offset) { 2953 if (offset) {
2955 rev = RBIOS8(offset - 1); 2954 rev = RBIOS8(offset - 1);
2956 if (rev < 1) { 2955 if (rev < 1) {
2957 if (((rdev->flags & RADEON_FAMILY_MASK) < 2956 if ((rdev->family < CHIP_R200)
2958 CHIP_R200) 2957 && !ASIC_IS_RN50(rdev)) {
2959 && ((dev->pdev->device != 0x515e)
2960 && (dev->pdev->device != 0x5969))) {
2961 int ram = 0; 2958 int ram = 0;
2962 int mem_addr_mapping = 0; 2959 int mem_addr_mapping = 0;
2963 2960
@@ -3050,6 +3047,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
3050 rdev->pdev->subsystem_device == 0x308b) 3047 rdev->pdev->subsystem_device == 0x308b)
3051 return; 3048 return;
3052 3049
3050 /* quirk for rs4xx HP dv5000 laptop to make it resume
3051 * - it hangs on resume inside the dynclk 1 table.
3052 */
3053 if (rdev->family == CHIP_RS480 &&
3054 rdev->pdev->subsystem_vendor == 0x103c &&
3055 rdev->pdev->subsystem_device == 0x30a4)
3056 return;
3057
3053 /* DYN CLK 1 */ 3058 /* DYN CLK 1 */
3054 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); 3059 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
3055 if (table) 3060 if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index f58f8bd8f77b..adccbc2c202c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -771,14 +771,14 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
771 } else 771 } else
772 ret = connector_status_connected; 772 ret = connector_status_connected;
773 773
774 /* multiple connectors on the same encoder with the same ddc line 774 /* This gets complicated. We have boards with VGA + HDMI with a
775 * This tends to be HDMI and DVI on the same encoder with the 775 * shared DDC line and we have boards with DVI-D + HDMI with a shared
776 * same ddc line. If the edid says HDMI, consider the HDMI port 776 * DDC line. The latter is more complex because with DVI<->HDMI adapters
777 * connected and the DVI port disconnected. If the edid doesn't 777 * you don't really know what's connected to which port as both are digital.
778 * say HDMI, vice versa.
779 */ 778 */
780 if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { 779 if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
781 struct drm_device *dev = connector->dev; 780 struct drm_device *dev = connector->dev;
781 struct radeon_device *rdev = dev->dev_private;
782 struct drm_connector *list_connector; 782 struct drm_connector *list_connector;
783 struct radeon_connector *list_radeon_connector; 783 struct radeon_connector *list_radeon_connector;
784 list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { 784 list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
@@ -788,15 +788,10 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
788 if (list_radeon_connector->shared_ddc && 788 if (list_radeon_connector->shared_ddc &&
789 (list_radeon_connector->ddc_bus->rec.i2c_id == 789 (list_radeon_connector->ddc_bus->rec.i2c_id ==
790 radeon_connector->ddc_bus->rec.i2c_id)) { 790 radeon_connector->ddc_bus->rec.i2c_id)) {
791 if (drm_detect_hdmi_monitor(radeon_connector->edid)) { 791 /* cases where both connectors are digital */
792 if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { 792 if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
793 kfree(radeon_connector->edid); 793 /* hpd is our only option in this case */
794 radeon_connector->edid = NULL; 794 if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
795 ret = connector_status_disconnected;
796 }
797 } else {
798 if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
799 (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
800 kfree(radeon_connector->edid); 795 kfree(radeon_connector->edid);
801 radeon_connector->edid = NULL; 796 radeon_connector->edid = NULL;
802 ret = connector_status_disconnected; 797 ret = connector_status_disconnected;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 37533bec1f25..0fea894fc127 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -226,20 +226,20 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
226{ 226{
227 u64 size_af, size_bf; 227 u64 size_af, size_bf;
228 228
229 size_af = 0xFFFFFFFF - mc->vram_end; 229 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
230 size_bf = mc->vram_start; 230 size_bf = mc->vram_start & ~mc->gtt_base_align;
231 if (size_bf > size_af) { 231 if (size_bf > size_af) {
232 if (mc->gtt_size > size_bf) { 232 if (mc->gtt_size > size_bf) {
233 dev_warn(rdev->dev, "limiting GTT\n"); 233 dev_warn(rdev->dev, "limiting GTT\n");
234 mc->gtt_size = size_bf; 234 mc->gtt_size = size_bf;
235 } 235 }
236 mc->gtt_start = mc->vram_start - mc->gtt_size; 236 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
237 } else { 237 } else {
238 if (mc->gtt_size > size_af) { 238 if (mc->gtt_size > size_af) {
239 dev_warn(rdev->dev, "limiting GTT\n"); 239 dev_warn(rdev->dev, "limiting GTT\n");
240 mc->gtt_size = size_af; 240 mc->gtt_size = size_af;
241 } 241 }
242 mc->gtt_start = mc->vram_end + 1; 242 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
243 } 243 }
244 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 244 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
245 dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", 245 dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
@@ -415,6 +415,22 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
415 return r; 415 return r;
416} 416}
417 417
418static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
419{
420 struct radeon_device *rdev = info->dev->dev_private;
421
422 WREG32_IO(reg*4, val);
423}
424
425static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
426{
427 struct radeon_device *rdev = info->dev->dev_private;
428 uint32_t r;
429
430 r = RREG32_IO(reg*4);
431 return r;
432}
433
418int radeon_atombios_init(struct radeon_device *rdev) 434int radeon_atombios_init(struct radeon_device *rdev)
419{ 435{
420 struct card_info *atom_card_info = 436 struct card_info *atom_card_info =
@@ -427,6 +443,15 @@ int radeon_atombios_init(struct radeon_device *rdev)
427 atom_card_info->dev = rdev->ddev; 443 atom_card_info->dev = rdev->ddev;
428 atom_card_info->reg_read = cail_reg_read; 444 atom_card_info->reg_read = cail_reg_read;
429 atom_card_info->reg_write = cail_reg_write; 445 atom_card_info->reg_write = cail_reg_write;
446 /* needed for iio ops */
447 if (rdev->rio_mem) {
448 atom_card_info->ioreg_read = cail_ioreg_read;
449 atom_card_info->ioreg_write = cail_ioreg_write;
450 } else {
451 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
452 atom_card_info->ioreg_read = cail_reg_read;
453 atom_card_info->ioreg_write = cail_reg_write;
454 }
430 atom_card_info->mc_read = cail_mc_read; 455 atom_card_info->mc_read = cail_mc_read;
431 atom_card_info->mc_write = cail_mc_write; 456 atom_card_info->mc_write = cail_mc_write;
432 atom_card_info->pll_read = cail_pll_read; 457 atom_card_info->pll_read = cail_pll_read;
@@ -573,7 +598,7 @@ int radeon_device_init(struct radeon_device *rdev,
573 struct pci_dev *pdev, 598 struct pci_dev *pdev,
574 uint32_t flags) 599 uint32_t flags)
575{ 600{
576 int r; 601 int r, i;
577 int dma_bits; 602 int dma_bits;
578 603
579 rdev->shutdown = false; 604 rdev->shutdown = false;
@@ -659,6 +684,17 @@ int radeon_device_init(struct radeon_device *rdev,
659 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 684 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
660 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 685 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
661 686
687 /* io port mapping */
688 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
689 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
690 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
691 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
692 break;
693 }
694 }
695 if (rdev->rio_mem == NULL)
696 DRM_ERROR("Unable to find PCI I/O BAR\n");
697
662 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 698 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
663 /* this will fail for cards that aren't VGA class devices, just 699 /* this will fail for cards that aren't VGA class devices, just
664 * ignore it */ 700 * ignore it */
@@ -701,6 +737,8 @@ void radeon_device_fini(struct radeon_device *rdev)
701 destroy_workqueue(rdev->wq); 737 destroy_workqueue(rdev->wq);
702 vga_switcheroo_unregister_client(rdev->pdev); 738 vga_switcheroo_unregister_client(rdev->pdev);
703 vga_client_register(rdev->pdev, NULL, NULL, NULL); 739 vga_client_register(rdev->pdev, NULL, NULL, NULL);
740 pci_iounmap(rdev->pdev, rdev->rio_mem);
741 rdev->rio_mem = NULL;
704 iounmap(rdev->rmmio); 742 iounmap(rdev->rmmio);
705 rdev->rmmio = NULL; 743 rdev->rmmio = NULL;
706} 744}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8154cdf796e4..a68728dbd41d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -558,15 +558,17 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
558 current_freq = radeon_div(tmp, ref_div * post_div); 558 current_freq = radeon_div(tmp, ref_div * post_div);
559 559
560 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 560 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
561 error = freq - current_freq; 561 if (freq < current_freq)
562 error = error < 0 ? 0xffffffff : error; 562 error = 0xffffffff;
563 else
564 error = freq - current_freq;
563 } else 565 } else
564 error = abs(current_freq - freq); 566 error = abs(current_freq - freq);
565 vco_diff = abs(vco - best_vco); 567 vco_diff = abs(vco - best_vco);
566 568
567 if ((best_vco == 0 && error < best_error) || 569 if ((best_vco == 0 && error < best_error) ||
568 (best_vco != 0 && 570 (best_vco != 0 &&
569 (error < best_error - 100 || 571 ((best_error > 100 && error < best_error - 100) ||
570 (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { 572 (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
571 best_post_div = post_div; 573 best_post_div = post_div;
572 best_ref_div = ref_div; 574 best_ref_div = ref_div;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ed0ceb3fc40a..6f8a2e572878 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -46,9 +46,10 @@
46 * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs 46 * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
47 * - 2.4.0 - add crtc id query 47 * - 2.4.0 - add crtc id query
48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen 48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
49 * - 2.6.0 - add tiling config query (r6xx+)
49 */ 50 */
50#define KMS_DRIVER_MAJOR 2 51#define KMS_DRIVER_MAJOR 2
51#define KMS_DRIVER_MINOR 5 52#define KMS_DRIVER_MINOR 6
52#define KMS_DRIVER_PATCHLEVEL 0 53#define KMS_DRIVER_PATCHLEVEL 0
53int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 54int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
54int radeon_driver_unload_kms(struct drm_device *dev); 55int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 6a70c0dc7f92..8931c8e78101 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -49,7 +49,7 @@ int radeon_driver_unload_kms(struct drm_device *dev)
49int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) 49int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
50{ 50{
51 struct radeon_device *rdev; 51 struct radeon_device *rdev;
52 int r; 52 int r, acpi_status;
53 53
54 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); 54 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
55 if (rdev == NULL) { 55 if (rdev == NULL) {
@@ -77,6 +77,12 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
77 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 77 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
78 goto out; 78 goto out;
79 } 79 }
80
81 /* Call ACPI methods */
82 acpi_status = radeon_acpi_init(rdev);
83 if (acpi_status)
84 dev_err(&dev->pdev->dev, "Error during ACPI methods call\n");
85
80 /* Again modeset_init should fail only on fatal error 86 /* Again modeset_init should fail only on fatal error
81 * otherwise it should provide enough functionalities 87 * otherwise it should provide enough functionalities
82 * for shadowfb to run 88 * for shadowfb to run
@@ -128,7 +134,8 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
128 for (i = 0, found = 0; i < rdev->num_crtc; i++) { 134 for (i = 0, found = 0; i < rdev->num_crtc; i++) {
129 crtc = (struct drm_crtc *)minfo->crtcs[i]; 135 crtc = (struct drm_crtc *)minfo->crtcs[i];
130 if (crtc && crtc->base.id == value) { 136 if (crtc && crtc->base.id == value) {
131 value = i; 137 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
138 value = radeon_crtc->crtc_id;
132 found = 1; 139 found = 1;
133 break; 140 break;
134 } 141 }
@@ -141,6 +148,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
141 case RADEON_INFO_ACCEL_WORKING2: 148 case RADEON_INFO_ACCEL_WORKING2:
142 value = rdev->accel_working; 149 value = rdev->accel_working;
143 break; 150 break;
151 case RADEON_INFO_TILING_CONFIG:
152 if (rdev->family >= CHIP_CEDAR)
153 value = rdev->config.evergreen.tile_config;
154 else if (rdev->family >= CHIP_RV770)
155 value = rdev->config.rv770.tile_config;
156 else if (rdev->family >= CHIP_R600)
157 value = rdev->config.r600.tile_config;
158 else {
159 DRM_DEBUG("tiling config is r6xx+ only!\n");
160 return -EINVAL;
161 }
162 break;
144 default: 163 default:
145 DRM_DEBUG("Invalid request %d\n", info->request); 164 DRM_DEBUG("Invalid request %d\n", info->request);
146 return -EINVAL; 165 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index bad77f40a9da..5688a0cf6bbe 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -108,6 +108,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
108 udelay(panel_pwr_delay * 1000); 108 udelay(panel_pwr_delay * 1000);
109 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); 109 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
110 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); 110 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
111 udelay(panel_pwr_delay * 1000);
111 break; 112 break;
112 } 113 }
113 114
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index f2ed27c8055b..032040397743 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -642,8 +642,8 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
642 } 642 }
643 flicker_removal = (tmp + 500) / 1000; 643 flicker_removal = (tmp + 500) / 1000;
644 644
645 if (flicker_removal < 2) 645 if (flicker_removal < 3)
646 flicker_removal = 2; 646 flicker_removal = 3;
647 for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) { 647 for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
648 if (flicker_removal == SLOPE_limit[i]) 648 if (flicker_removal == SLOPE_limit[i])
649 break; 649 break;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d5b9373ce06c..0afd1e62347d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -110,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
110 bo->surface_reg = -1; 110 bo->surface_reg = -1;
111 INIT_LIST_HEAD(&bo->list); 111 INIT_LIST_HEAD(&bo->list);
112 112
113retry:
113 radeon_ttm_placement_from_domain(bo, domain); 114 radeon_ttm_placement_from_domain(bo, domain);
114 /* Kernel allocation are uninterruptible */ 115 /* Kernel allocation are uninterruptible */
115 mutex_lock(&rdev->vram_mutex); 116 mutex_lock(&rdev->vram_mutex);
@@ -118,10 +119,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
118 &radeon_ttm_bo_destroy); 119 &radeon_ttm_bo_destroy);
119 mutex_unlock(&rdev->vram_mutex); 120 mutex_unlock(&rdev->vram_mutex);
120 if (unlikely(r != 0)) { 121 if (unlikely(r != 0)) {
121 if (r != -ERESTARTSYS) 122 if (r != -ERESTARTSYS) {
123 if (domain == RADEON_GEM_DOMAIN_VRAM) {
124 domain |= RADEON_GEM_DOMAIN_GTT;
125 goto retry;
126 }
122 dev_err(rdev->dev, 127 dev_err(rdev->dev,
123 "object_init failed for (%lu, 0x%08X)\n", 128 "object_init failed for (%lu, 0x%08X)\n",
124 size, domain); 129 size, domain);
130 }
125 return r; 131 return r;
126 } 132 }
127 *bo_ptr = bo; 133 *bo_ptr = bo;
@@ -321,6 +327,7 @@ int radeon_bo_list_validate(struct list_head *head)
321{ 327{
322 struct radeon_bo_list *lobj; 328 struct radeon_bo_list *lobj;
323 struct radeon_bo *bo; 329 struct radeon_bo *bo;
330 u32 domain;
324 int r; 331 int r;
325 332
326 list_for_each_entry(lobj, head, list) { 333 list_for_each_entry(lobj, head, list) {
@@ -333,17 +340,19 @@ int radeon_bo_list_validate(struct list_head *head)
333 list_for_each_entry(lobj, head, list) { 340 list_for_each_entry(lobj, head, list) {
334 bo = lobj->bo; 341 bo = lobj->bo;
335 if (!bo->pin_count) { 342 if (!bo->pin_count) {
336 if (lobj->wdomain) { 343 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
337 radeon_ttm_placement_from_domain(bo, 344
338 lobj->wdomain); 345 retry:
339 } else { 346 radeon_ttm_placement_from_domain(bo, domain);
340 radeon_ttm_placement_from_domain(bo,
341 lobj->rdomain);
342 }
343 r = ttm_bo_validate(&bo->tbo, &bo->placement, 347 r = ttm_bo_validate(&bo->tbo, &bo->placement,
344 true, false, false); 348 true, false, false);
345 if (unlikely(r)) 349 if (unlikely(r)) {
350 if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
351 domain |= RADEON_GEM_DOMAIN_GTT;
352 goto retry;
353 }
346 return r; 354 return r;
355 }
347 } 356 }
348 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 357 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
349 lobj->tiling_flags = bo->tiling_flags; 358 lobj->tiling_flags = bo->tiling_flags;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 115d26b762cc..ed66062ae9d0 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -27,6 +27,8 @@
27#include <linux/acpi.h> 27#include <linux/acpi.h>
28#endif 28#endif
29#include <linux/power_supply.h> 29#include <linux/power_supply.h>
30#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h>
30 32
31#define RADEON_IDLE_LOOP_MS 100 33#define RADEON_IDLE_LOOP_MS 100
32#define RADEON_RECLOCK_DELAY_MS 200 34#define RADEON_RECLOCK_DELAY_MS 200
@@ -423,6 +425,82 @@ fail:
423static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 425static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
424static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 426static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
425 427
428static ssize_t radeon_hwmon_show_temp(struct device *dev,
429 struct device_attribute *attr,
430 char *buf)
431{
432 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
433 struct radeon_device *rdev = ddev->dev_private;
434 u32 temp;
435
436 switch (rdev->pm.int_thermal_type) {
437 case THERMAL_TYPE_RV6XX:
438 temp = rv6xx_get_temp(rdev);
439 break;
440 case THERMAL_TYPE_RV770:
441 temp = rv770_get_temp(rdev);
442 break;
443 case THERMAL_TYPE_EVERGREEN:
444 temp = evergreen_get_temp(rdev);
445 break;
446 default:
447 temp = 0;
448 break;
449 }
450
451 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
452}
453
454static ssize_t radeon_hwmon_show_name(struct device *dev,
455 struct device_attribute *attr,
456 char *buf)
457{
458 return sprintf(buf, "radeon\n");
459}
460
461static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
462static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
463
464static struct attribute *hwmon_attributes[] = {
465 &sensor_dev_attr_temp1_input.dev_attr.attr,
466 &sensor_dev_attr_name.dev_attr.attr,
467 NULL
468};
469
470static const struct attribute_group hwmon_attrgroup = {
471 .attrs = hwmon_attributes,
472};
473
474static void radeon_hwmon_init(struct radeon_device *rdev)
475{
476 int err;
477
478 rdev->pm.int_hwmon_dev = NULL;
479
480 switch (rdev->pm.int_thermal_type) {
481 case THERMAL_TYPE_RV6XX:
482 case THERMAL_TYPE_RV770:
483 case THERMAL_TYPE_EVERGREEN:
484 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
485 dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
486 err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
487 &hwmon_attrgroup);
488 if (err)
489 DRM_ERROR("Unable to create hwmon sysfs file: %d\n", err);
490 break;
491 default:
492 break;
493 }
494}
495
496static void radeon_hwmon_fini(struct radeon_device *rdev)
497{
498 if (rdev->pm.int_hwmon_dev) {
499 sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
500 hwmon_device_unregister(rdev->pm.int_hwmon_dev);
501 }
502}
503
426void radeon_pm_suspend(struct radeon_device *rdev) 504void radeon_pm_suspend(struct radeon_device *rdev)
427{ 505{
428 bool flush_wq = false; 506 bool flush_wq = false;
@@ -470,6 +548,7 @@ int radeon_pm_init(struct radeon_device *rdev)
470 rdev->pm.dynpm_can_downclock = true; 548 rdev->pm.dynpm_can_downclock = true;
471 rdev->pm.current_sclk = rdev->clock.default_sclk; 549 rdev->pm.current_sclk = rdev->clock.default_sclk;
472 rdev->pm.current_mclk = rdev->clock.default_mclk; 550 rdev->pm.current_mclk = rdev->clock.default_mclk;
551 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
473 552
474 if (rdev->bios) { 553 if (rdev->bios) {
475 if (rdev->is_atom_bios) 554 if (rdev->is_atom_bios)
@@ -480,6 +559,8 @@ int radeon_pm_init(struct radeon_device *rdev)
480 radeon_pm_init_profile(rdev); 559 radeon_pm_init_profile(rdev);
481 } 560 }
482 561
562 /* set up the internal thermal sensor if applicable */
563 radeon_hwmon_init(rdev);
483 if (rdev->pm.num_power_states > 1) { 564 if (rdev->pm.num_power_states > 1) {
484 /* where's the best place to put these? */ 565 /* where's the best place to put these? */
485 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 566 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
@@ -535,6 +616,7 @@ void radeon_pm_fini(struct radeon_device *rdev)
535#endif 616#endif
536 } 617 }
537 618
619 radeon_hwmon_fini(rdev);
538 if (rdev->pm.i2c_bus) 620 if (rdev->pm.i2c_bus)
539 radeon_i2c_destroy(rdev->pm.i2c_bus); 621 radeon_i2c_destroy(rdev->pm.i2c_bus);
540} 622}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 9e4240b3bf0b..ae2b76b9a388 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -55,12 +55,6 @@ void rs400_gart_adjust_size(struct radeon_device *rdev)
55 rdev->mc.gtt_size = 32 * 1024 * 1024; 55 rdev->mc.gtt_size = 32 * 1024 * 1024;
56 return; 56 return;
57 } 57 }
58 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
59 /* FIXME: RS400 & RS480 seems to have issue with GART size
60 * if 4G of system memory (needs more testing) */
61 rdev->mc.gtt_size = 32 * 1024 * 1024;
62 DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
63 }
64} 58}
65 59
66void rs400_gart_tlb_flush(struct radeon_device *rdev) 60void rs400_gart_tlb_flush(struct radeon_device *rdev)
@@ -263,6 +257,7 @@ void rs400_mc_init(struct radeon_device *rdev)
263 r100_vram_init_sizes(rdev); 257 r100_vram_init_sizes(rdev);
264 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 258 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
265 radeon_vram_location(rdev, &rdev->mc, base); 259 radeon_vram_location(rdev, &rdev->mc, base);
260 rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
266 radeon_gtt_location(rdev, &rdev->mc); 261 radeon_gtt_location(rdev, &rdev->mc);
267 radeon_update_bandwidth_info(rdev); 262 radeon_update_bandwidth_info(rdev);
268} 263}
@@ -480,6 +475,8 @@ int rs400_init(struct radeon_device *rdev)
480 /* Initialize surface registers */ 475 /* Initialize surface registers */
481 radeon_surface_init(rdev); 476 radeon_surface_init(rdev);
482 /* TODO: disable VGA need to use VGA request */ 477 /* TODO: disable VGA need to use VGA request */
478 /* restore some register to sane defaults */
479 r100_restore_sanity(rdev);
483 /* BIOS*/ 480 /* BIOS*/
484 if (!radeon_get_bios(rdev)) { 481 if (!radeon_get_bios(rdev)) {
485 if (ASIC_IS_AVIVO(rdev)) 482 if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5ce3ccc7a423..85cd911952c1 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -698,6 +698,7 @@ void rs600_mc_init(struct radeon_device *rdev)
698 base = G_000004_MC_FB_START(base) << 16; 698 base = G_000004_MC_FB_START(base) << 16;
699 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 699 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
700 radeon_vram_location(rdev, &rdev->mc, base); 700 radeon_vram_location(rdev, &rdev->mc, base);
701 rdev->mc.gtt_base_align = 0;
701 radeon_gtt_location(rdev, &rdev->mc); 702 radeon_gtt_location(rdev, &rdev->mc);
702 radeon_update_bandwidth_info(rdev); 703 radeon_update_bandwidth_info(rdev);
703} 704}
@@ -812,6 +813,13 @@ static int rs600_startup(struct radeon_device *rdev)
812 dev_err(rdev->dev, "failled initializing IB (%d).\n", r); 813 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
813 return r; 814 return r;
814 } 815 }
816
817 r = r600_audio_init(rdev);
818 if (r) {
819 dev_err(rdev->dev, "failed initializing audio\n");
820 return r;
821 }
822
815 return 0; 823 return 0;
816} 824}
817 825
@@ -838,6 +846,7 @@ int rs600_resume(struct radeon_device *rdev)
838 846
839int rs600_suspend(struct radeon_device *rdev) 847int rs600_suspend(struct radeon_device *rdev)
840{ 848{
849 r600_audio_fini(rdev);
841 r100_cp_disable(rdev); 850 r100_cp_disable(rdev);
842 r100_wb_disable(rdev); 851 r100_wb_disable(rdev);
843 rs600_irq_disable(rdev); 852 rs600_irq_disable(rdev);
@@ -847,6 +856,7 @@ int rs600_suspend(struct radeon_device *rdev)
847 856
848void rs600_fini(struct radeon_device *rdev) 857void rs600_fini(struct radeon_device *rdev)
849{ 858{
859 r600_audio_fini(rdev);
850 r100_cp_fini(rdev); 860 r100_cp_fini(rdev);
851 r100_wb_fini(rdev); 861 r100_wb_fini(rdev);
852 r100_ib_fini(rdev); 862 r100_ib_fini(rdev);
@@ -870,6 +880,8 @@ int rs600_init(struct radeon_device *rdev)
870 radeon_scratch_init(rdev); 880 radeon_scratch_init(rdev);
871 /* Initialize surface registers */ 881 /* Initialize surface registers */
872 radeon_surface_init(rdev); 882 radeon_surface_init(rdev);
883 /* restore some register to sane defaults */
884 r100_restore_sanity(rdev);
873 /* BIOS */ 885 /* BIOS */
874 if (!radeon_get_bios(rdev)) { 886 if (!radeon_get_bios(rdev)) {
875 if (ASIC_IS_AVIVO(rdev)) 887 if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 5fea094ed8cb..f3a8c9344c64 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -162,6 +162,7 @@ void rs690_mc_init(struct radeon_device *rdev)
162 rs690_pm_info(rdev); 162 rs690_pm_info(rdev);
163 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 163 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
164 radeon_vram_location(rdev, &rdev->mc, base); 164 radeon_vram_location(rdev, &rdev->mc, base);
165 rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
165 radeon_gtt_location(rdev, &rdev->mc); 166 radeon_gtt_location(rdev, &rdev->mc);
166 radeon_update_bandwidth_info(rdev); 167 radeon_update_bandwidth_info(rdev);
167} 168}
@@ -640,6 +641,13 @@ static int rs690_startup(struct radeon_device *rdev)
640 dev_err(rdev->dev, "failled initializing IB (%d).\n", r); 641 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
641 return r; 642 return r;
642 } 643 }
644
645 r = r600_audio_init(rdev);
646 if (r) {
647 dev_err(rdev->dev, "failed initializing audio\n");
648 return r;
649 }
650
643 return 0; 651 return 0;
644} 652}
645 653
@@ -666,6 +674,7 @@ int rs690_resume(struct radeon_device *rdev)
666 674
667int rs690_suspend(struct radeon_device *rdev) 675int rs690_suspend(struct radeon_device *rdev)
668{ 676{
677 r600_audio_fini(rdev);
669 r100_cp_disable(rdev); 678 r100_cp_disable(rdev);
670 r100_wb_disable(rdev); 679 r100_wb_disable(rdev);
671 rs600_irq_disable(rdev); 680 rs600_irq_disable(rdev);
@@ -675,6 +684,7 @@ int rs690_suspend(struct radeon_device *rdev)
675 684
676void rs690_fini(struct radeon_device *rdev) 685void rs690_fini(struct radeon_device *rdev)
677{ 686{
687 r600_audio_fini(rdev);
678 r100_cp_fini(rdev); 688 r100_cp_fini(rdev);
679 r100_wb_fini(rdev); 689 r100_wb_fini(rdev);
680 r100_ib_fini(rdev); 690 r100_ib_fini(rdev);
@@ -698,6 +708,8 @@ int rs690_init(struct radeon_device *rdev)
698 radeon_scratch_init(rdev); 708 radeon_scratch_init(rdev);
699 /* Initialize surface registers */ 709 /* Initialize surface registers */
700 radeon_surface_init(rdev); 710 radeon_surface_init(rdev);
711 /* restore some register to sane defaults */
712 r100_restore_sanity(rdev);
701 /* TODO: disable VGA need to use VGA request */ 713 /* TODO: disable VGA need to use VGA request */
702 /* BIOS*/ 714 /* BIOS*/
703 if (!radeon_get_bios(rdev)) { 715 if (!radeon_get_bios(rdev)) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 7d9a7b0a180a..b951b8790175 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -195,6 +195,7 @@ void rv515_mc_init(struct radeon_device *rdev)
195 rv515_vram_get_type(rdev); 195 rv515_vram_get_type(rdev);
196 r100_vram_init_sizes(rdev); 196 r100_vram_init_sizes(rdev);
197 radeon_vram_location(rdev, &rdev->mc, 0); 197 radeon_vram_location(rdev, &rdev->mc, 0);
198 rdev->mc.gtt_base_align = 0;
198 if (!(rdev->flags & RADEON_IS_AGP)) 199 if (!(rdev->flags & RADEON_IS_AGP))
199 radeon_gtt_location(rdev, &rdev->mc); 200 radeon_gtt_location(rdev, &rdev->mc);
200 radeon_update_bandwidth_info(rdev); 201 radeon_update_bandwidth_info(rdev);
@@ -468,6 +469,8 @@ int rv515_init(struct radeon_device *rdev)
468 /* Initialize surface registers */ 469 /* Initialize surface registers */
469 radeon_surface_init(rdev); 470 radeon_surface_init(rdev);
470 /* TODO: disable VGA need to use VGA request */ 471 /* TODO: disable VGA need to use VGA request */
472 /* restore some register to sane defaults */
473 r100_restore_sanity(rdev);
471 /* BIOS*/ 474 /* BIOS*/
472 if (!radeon_get_bios(rdev)) { 475 if (!radeon_get_bios(rdev)) {
473 if (ASIC_IS_AVIVO(rdev)) 476 if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 6a7bf1091971..f1c796810117 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,21 @@
42static void rv770_gpu_init(struct radeon_device *rdev); 42static void rv770_gpu_init(struct radeon_device *rdev);
43void rv770_fini(struct radeon_device *rdev); 43void rv770_fini(struct radeon_device *rdev);
44 44
45/* get temperature in millidegrees */
46u32 rv770_get_temp(struct radeon_device *rdev)
47{
48 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
49 ASIC_T_SHIFT;
50 u32 actual_temp = 0;
51
52 if ((temp >> 9) & 1)
53 actual_temp = 0;
54 else
55 actual_temp = (temp >> 1) & 0xff;
56
57 return actual_temp * 1000;
58}
59
45void rv770_pm_misc(struct radeon_device *rdev) 60void rv770_pm_misc(struct radeon_device *rdev)
46{ 61{
47 int req_ps_idx = rdev->pm.requested_power_state_index; 62 int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -189,7 +204,10 @@ static void rv770_mc_program(struct radeon_device *rdev)
189 WREG32((0x2c20 + j), 0x00000000); 204 WREG32((0x2c20 + j), 0x00000000);
190 WREG32((0x2c24 + j), 0x00000000); 205 WREG32((0x2c24 + j), 0x00000000);
191 } 206 }
192 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 207 /* r7xx hw bug. Read from HDP_DEBUG1 rather
208 * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
209 */
210 tmp = RREG32(HDP_DEBUG1);
193 211
194 rv515_mc_stop(rdev, &save); 212 rv515_mc_stop(rdev, &save);
195 if (r600_mc_wait_for_idle(rdev)) { 213 if (r600_mc_wait_for_idle(rdev)) {
@@ -659,8 +677,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
659 r600_count_pipe_bits((cc_rb_backend_disable & 677 r600_count_pipe_bits((cc_rb_backend_disable &
660 R7XX_MAX_BACKENDS_MASK) >> 16)), 678 R7XX_MAX_BACKENDS_MASK) >> 16)),
661 (cc_rb_backend_disable >> 16)); 679 (cc_rb_backend_disable >> 16));
662 gb_tiling_config |= BACKEND_MAP(backend_map);
663 680
681 rdev->config.rv770.tile_config = gb_tiling_config;
682 gb_tiling_config |= BACKEND_MAP(backend_map);
664 683
665 WREG32(GB_TILING_CONFIG, gb_tiling_config); 684 WREG32(GB_TILING_CONFIG, gb_tiling_config);
666 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 685 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9506f8cb99e0..b7a5a20e81dc 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -122,12 +122,18 @@
122#define GUI_ACTIVE (1<<31) 122#define GUI_ACTIVE (1<<31)
123#define GRBM_STATUS2 0x8014 123#define GRBM_STATUS2 0x8014
124 124
125#define CG_MULT_THERMAL_STATUS 0x740
126#define ASIC_T(x) ((x) << 16)
127#define ASIC_T_MASK 0x3FF0000
128#define ASIC_T_SHIFT 16
129
125#define HDP_HOST_PATH_CNTL 0x2C00 130#define HDP_HOST_PATH_CNTL 0x2C00
126#define HDP_NONSURFACE_BASE 0x2C04 131#define HDP_NONSURFACE_BASE 0x2C04
127#define HDP_NONSURFACE_INFO 0x2C08 132#define HDP_NONSURFACE_INFO 0x2C08
128#define HDP_NONSURFACE_SIZE 0x2C0C 133#define HDP_NONSURFACE_SIZE 0x2C0C
129#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 134#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
130#define HDP_TILING_CONFIG 0x2F3C 135#define HDP_TILING_CONFIG 0x2F3C
136#define HDP_DEBUG1 0x2F34
131 137
132#define MC_SHARED_CHMAP 0x2004 138#define MC_SHARED_CHMAP 0x2004
133#define NOOFCHAN_SHIFT 12 139#define NOOFCHAN_SHIFT 12
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4fd1f067d380..776bf9e9ea1a 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -47,9 +47,8 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
47 dev->dev_private = (void *)dev_priv; 47 dev->dev_private = (void *)dev_priv;
48 dev_priv->chipset = chipset; 48 dev_priv->chipset = chipset;
49 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 49 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
50 if (ret) { 50 if (ret)
51 kfree(dev_priv); 51 kfree(dev_priv);
52 }
53 52
54 return ret; 53 return ret;
55} 54}
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index af22111397d8..07d0f2979cac 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -78,7 +78,7 @@ static unsigned long sis_sman_mm_offset(void *private, void *ref)
78#else /* CONFIG_FB_SIS[_MODULE] */ 78#else /* CONFIG_FB_SIS[_MODULE] */
79 79
80#define SIS_MM_ALIGN_SHIFT 4 80#define SIS_MM_ALIGN_SHIFT 4
81#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1) 81#define SIS_MM_ALIGN_MASK ((1 << SIS_MM_ALIGN_SHIFT) - 1)
82 82
83#endif /* CONFIG_FB_SIS[_MODULE] */ 83#endif /* CONFIG_FB_SIS[_MODULE] */
84 84
@@ -225,9 +225,8 @@ static drm_local_map_t *sis_reg_init(struct drm_device *dev)
225 map = entry->map; 225 map = entry->map;
226 if (!map) 226 if (!map)
227 continue; 227 continue;
228 if (map->type == _DRM_REGISTERS) { 228 if (map->type == _DRM_REGISTERS)
229 return map; 229 return map;
230 }
231 } 230 }
232 return NULL; 231 return NULL;
233} 232}
@@ -264,10 +263,10 @@ int sis_idle(struct drm_device *dev)
264 263
265 end = jiffies + (DRM_HZ * 3); 264 end = jiffies + (DRM_HZ * 3);
266 265
267 for (i=0; i<4; ++i) { 266 for (i = 0; i < 4; ++i) {
268 do { 267 do {
269 idle_reg = SIS_READ(0x85cc); 268 idle_reg = SIS_READ(0x85cc);
270 } while ( !time_after_eq(jiffies, end) && 269 } while (!time_after_eq(jiffies, end) &&
271 ((idle_reg & 0x80000000) != 0x80000000)); 270 ((idle_reg & 0x80000000) != 0x80000000));
272 } 271 }
273 272
@@ -301,7 +300,7 @@ void sis_lastclose(struct drm_device *dev)
301 mutex_unlock(&dev->struct_mutex); 300 mutex_unlock(&dev->struct_mutex);
302} 301}
303 302
304void sis_reclaim_buffers_locked(struct drm_device * dev, 303void sis_reclaim_buffers_locked(struct drm_device *dev,
305 struct drm_file *file_priv) 304 struct drm_file *file_priv)
306{ 305{
307 drm_sis_private_t *dev_priv = dev->dev_private; 306 drm_sis_private_t *dev_priv = dev->dev_private;
@@ -312,9 +311,8 @@ void sis_reclaim_buffers_locked(struct drm_device * dev,
312 return; 311 return;
313 } 312 }
314 313
315 if (dev->driver->dma_quiescent) { 314 if (dev->driver->dma_quiescent)
316 dev->driver->dma_quiescent(dev); 315 dev->driver->dma_quiescent(dev);
317 }
318 316
319 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); 317 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
320 mutex_unlock(&dev->struct_mutex); 318 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index b1d67dc973dc..ca904799f018 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -40,11 +40,13 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41 41
42#include <asm/atomic.h> 42#include <asm/atomic.h>
43#include <asm/agp.h>
44 43
45#include "ttm/ttm_bo_driver.h" 44#include "ttm/ttm_bo_driver.h"
46#include "ttm/ttm_page_alloc.h" 45#include "ttm/ttm_page_alloc.h"
47 46
47#ifdef TTM_HAS_AGP
48#include <asm/agp.h>
49#endif
48 50
49#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) 51#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
50#define SMALL_ALLOCATION 16 52#define SMALL_ALLOCATION 16
@@ -392,7 +394,7 @@ static int ttm_pool_get_num_unused_pages(void)
392/** 394/**
393 * Callback for mm to request pool to reduce number of page held. 395 * Callback for mm to request pool to reduce number of page held.
394 */ 396 */
395static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) 397static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
396{ 398{
397 static atomic_t start_pool = ATOMIC_INIT(0); 399 static atomic_t start_pool = ATOMIC_INIT(0);
398 unsigned i; 400 unsigned i;
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index bfb92d283260..68dda74a50ae 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -58,28 +58,29 @@
58 *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \ 58 *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
59 *((uint32_t *)(vb) + 1) = (nData); \ 59 *((uint32_t *)(vb) + 1) = (nData); \
60 vb = ((uint32_t *)vb) + 2; \ 60 vb = ((uint32_t *)vb) + 2; \
61 dev_priv->dma_low +=8; \ 61 dev_priv->dma_low += 8; \
62} 62}
63 63
64#define via_flush_write_combine() DRM_MEMORYBARRIER() 64#define via_flush_write_combine() DRM_MEMORYBARRIER()
65 65
66#define VIA_OUT_RING_QW(w1,w2) \ 66#define VIA_OUT_RING_QW(w1, w2) do { \
67 *vb++ = (w1); \ 67 *vb++ = (w1); \
68 *vb++ = (w2); \ 68 *vb++ = (w2); \
69 dev_priv->dma_low += 8; 69 dev_priv->dma_low += 8; \
70} while (0)
70 71
71static void via_cmdbuf_start(drm_via_private_t * dev_priv); 72static void via_cmdbuf_start(drm_via_private_t *dev_priv);
72static void via_cmdbuf_pause(drm_via_private_t * dev_priv); 73static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
73static void via_cmdbuf_reset(drm_via_private_t * dev_priv); 74static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
74static void via_cmdbuf_rewind(drm_via_private_t * dev_priv); 75static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
75static int via_wait_idle(drm_via_private_t * dev_priv); 76static int via_wait_idle(drm_via_private_t *dev_priv);
76static void via_pad_cache(drm_via_private_t * dev_priv, int qwords); 77static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
77 78
78/* 79/*
79 * Free space in command buffer. 80 * Free space in command buffer.
80 */ 81 */
81 82
82static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv) 83static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
83{ 84{
84 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; 85 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
85 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; 86 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
@@ -93,7 +94,7 @@ static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv)
93 * How much does the command regulator lag behind? 94 * How much does the command regulator lag behind?
94 */ 95 */
95 96
96static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv) 97static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
97{ 98{
98 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; 99 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
99 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; 100 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
@@ -108,7 +109,7 @@ static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv)
108 */ 109 */
109 110
110static inline int 111static inline int
111via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size) 112via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
112{ 113{
113 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; 114 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
114 uint32_t cur_addr, hw_addr, next_addr; 115 uint32_t cur_addr, hw_addr, next_addr;
@@ -146,14 +147,13 @@ static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
146 dev_priv->dma_high) { 147 dev_priv->dma_high) {
147 via_cmdbuf_rewind(dev_priv); 148 via_cmdbuf_rewind(dev_priv);
148 } 149 }
149 if (via_cmdbuf_wait(dev_priv, size) != 0) { 150 if (via_cmdbuf_wait(dev_priv, size) != 0)
150 return NULL; 151 return NULL;
151 }
152 152
153 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); 153 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
154} 154}
155 155
156int via_dma_cleanup(struct drm_device * dev) 156int via_dma_cleanup(struct drm_device *dev)
157{ 157{
158 if (dev->dev_private) { 158 if (dev->dev_private) {
159 drm_via_private_t *dev_priv = 159 drm_via_private_t *dev_priv =
@@ -171,9 +171,9 @@ int via_dma_cleanup(struct drm_device * dev)
171 return 0; 171 return 0;
172} 172}
173 173
174static int via_initialize(struct drm_device * dev, 174static int via_initialize(struct drm_device *dev,
175 drm_via_private_t * dev_priv, 175 drm_via_private_t *dev_priv,
176 drm_via_dma_init_t * init) 176 drm_via_dma_init_t *init)
177{ 177{
178 if (!dev_priv || !dev_priv->mmio) { 178 if (!dev_priv || !dev_priv->mmio) {
179 DRM_ERROR("via_dma_init called before via_map_init\n"); 179 DRM_ERROR("via_dma_init called before via_map_init\n");
@@ -258,7 +258,7 @@ static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *fil
258 return retcode; 258 return retcode;
259} 259}
260 260
261static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd) 261static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
262{ 262{
263 drm_via_private_t *dev_priv; 263 drm_via_private_t *dev_priv;
264 uint32_t *vb; 264 uint32_t *vb;
@@ -271,9 +271,8 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
271 return -EFAULT; 271 return -EFAULT;
272 } 272 }
273 273
274 if (cmd->size > VIA_PCI_BUF_SIZE) { 274 if (cmd->size > VIA_PCI_BUF_SIZE)
275 return -ENOMEM; 275 return -ENOMEM;
276 }
277 276
278 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) 277 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
279 return -EFAULT; 278 return -EFAULT;
@@ -291,9 +290,8 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
291 } 290 }
292 291
293 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size); 292 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
294 if (vb == NULL) { 293 if (vb == NULL)
295 return -EAGAIN; 294 return -EAGAIN;
296 }
297 295
298 memcpy(vb, dev_priv->pci_buf, cmd->size); 296 memcpy(vb, dev_priv->pci_buf, cmd->size);
299 297
@@ -311,13 +309,12 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
311 return 0; 309 return 0;
312} 310}
313 311
314int via_driver_dma_quiescent(struct drm_device * dev) 312int via_driver_dma_quiescent(struct drm_device *dev)
315{ 313{
316 drm_via_private_t *dev_priv = dev->dev_private; 314 drm_via_private_t *dev_priv = dev->dev_private;
317 315
318 if (!via_wait_idle(dev_priv)) { 316 if (!via_wait_idle(dev_priv))
319 return -EBUSY; 317 return -EBUSY;
320 }
321 return 0; 318 return 0;
322} 319}
323 320
@@ -339,22 +336,17 @@ static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *fi
339 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size); 336 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
340 337
341 ret = via_dispatch_cmdbuffer(dev, cmdbuf); 338 ret = via_dispatch_cmdbuffer(dev, cmdbuf);
342 if (ret) { 339 return ret;
343 return ret;
344 }
345
346 return 0;
347} 340}
348 341
349static int via_dispatch_pci_cmdbuffer(struct drm_device * dev, 342static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
350 drm_via_cmdbuffer_t * cmd) 343 drm_via_cmdbuffer_t *cmd)
351{ 344{
352 drm_via_private_t *dev_priv = dev->dev_private; 345 drm_via_private_t *dev_priv = dev->dev_private;
353 int ret; 346 int ret;
354 347
355 if (cmd->size > VIA_PCI_BUF_SIZE) { 348 if (cmd->size > VIA_PCI_BUF_SIZE)
356 return -ENOMEM; 349 return -ENOMEM;
357 }
358 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) 350 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
359 return -EFAULT; 351 return -EFAULT;
360 352
@@ -380,19 +372,14 @@ static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file
380 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size); 372 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
381 373
382 ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf); 374 ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
383 if (ret) { 375 return ret;
384 return ret;
385 }
386
387 return 0;
388} 376}
389 377
390static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv, 378static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
391 uint32_t * vb, int qw_count) 379 uint32_t * vb, int qw_count)
392{ 380{
393 for (; qw_count > 0; --qw_count) { 381 for (; qw_count > 0; --qw_count)
394 VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY); 382 VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
395 }
396 return vb; 383 return vb;
397} 384}
398 385
@@ -401,7 +388,7 @@ static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
401 * 388 *
402 * Returns virtual pointer to ring buffer. 389 * Returns virtual pointer to ring buffer.
403 */ 390 */
404static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv) 391static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
405{ 392{
406 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); 393 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
407} 394}
@@ -411,18 +398,18 @@ static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
411 * modifying the pause address stored in the buffer itself. If 398 * modifying the pause address stored in the buffer itself. If
412 * the regulator has already paused, restart it. 399 * the regulator has already paused, restart it.
413 */ 400 */
414static int via_hook_segment(drm_via_private_t * dev_priv, 401static int via_hook_segment(drm_via_private_t *dev_priv,
415 uint32_t pause_addr_hi, uint32_t pause_addr_lo, 402 uint32_t pause_addr_hi, uint32_t pause_addr_lo,
416 int no_pci_fire) 403 int no_pci_fire)
417{ 404{
418 int paused, count; 405 int paused, count;
419 volatile uint32_t *paused_at = dev_priv->last_pause_ptr; 406 volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
420 uint32_t reader,ptr; 407 uint32_t reader, ptr;
421 uint32_t diff; 408 uint32_t diff;
422 409
423 paused = 0; 410 paused = 0;
424 via_flush_write_combine(); 411 via_flush_write_combine();
425 (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1); 412 (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
426 413
427 *paused_at = pause_addr_lo; 414 *paused_at = pause_addr_lo;
428 via_flush_write_combine(); 415 via_flush_write_combine();
@@ -435,7 +422,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
435 dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1; 422 dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
436 423
437 /* 424 /*
438 * If there is a possibility that the command reader will 425 * If there is a possibility that the command reader will
439 * miss the new pause address and pause on the old one, 426 * miss the new pause address and pause on the old one,
440 * In that case we need to program the new start address 427 * In that case we need to program the new start address
441 * using PCI. 428 * using PCI.
@@ -443,9 +430,9 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
443 430
444 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff; 431 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
445 count = 10000000; 432 count = 10000000;
446 while(diff == 0 && count--) { 433 while (diff == 0 && count--) {
447 paused = (VIA_READ(0x41c) & 0x80000000); 434 paused = (VIA_READ(0x41c) & 0x80000000);
448 if (paused) 435 if (paused)
449 break; 436 break;
450 reader = *(dev_priv->hw_addr_ptr); 437 reader = *(dev_priv->hw_addr_ptr);
451 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff; 438 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
@@ -477,7 +464,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
477 return paused; 464 return paused;
478} 465}
479 466
480static int via_wait_idle(drm_via_private_t * dev_priv) 467static int via_wait_idle(drm_via_private_t *dev_priv)
481{ 468{
482 int count = 10000000; 469 int count = 10000000;
483 470
@@ -491,9 +478,9 @@ static int via_wait_idle(drm_via_private_t * dev_priv)
491 return count; 478 return count;
492} 479}
493 480
494static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type, 481static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
495 uint32_t addr, uint32_t * cmd_addr_hi, 482 uint32_t addr, uint32_t *cmd_addr_hi,
496 uint32_t * cmd_addr_lo, int skip_wait) 483 uint32_t *cmd_addr_lo, int skip_wait)
497{ 484{
498 uint32_t agp_base; 485 uint32_t agp_base;
499 uint32_t cmd_addr, addr_lo, addr_hi; 486 uint32_t cmd_addr, addr_lo, addr_hi;
@@ -521,7 +508,7 @@ static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
521 return vb; 508 return vb;
522} 509}
523 510
524static void via_cmdbuf_start(drm_via_private_t * dev_priv) 511static void via_cmdbuf_start(drm_via_private_t *dev_priv)
525{ 512{
526 uint32_t pause_addr_lo, pause_addr_hi; 513 uint32_t pause_addr_lo, pause_addr_hi;
527 uint32_t start_addr, start_addr_lo; 514 uint32_t start_addr, start_addr_lo;
@@ -580,7 +567,7 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
580 dev_priv->dma_diff = ptr - reader; 567 dev_priv->dma_diff = ptr - reader;
581} 568}
582 569
583static void via_pad_cache(drm_via_private_t * dev_priv, int qwords) 570static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
584{ 571{
585 uint32_t *vb; 572 uint32_t *vb;
586 573
@@ -590,7 +577,7 @@ static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
590 via_align_buffer(dev_priv, vb, qwords); 577 via_align_buffer(dev_priv, vb, qwords);
591} 578}
592 579
593static inline void via_dummy_bitblt(drm_via_private_t * dev_priv) 580static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
594{ 581{
595 uint32_t *vb = via_get_dma(dev_priv); 582 uint32_t *vb = via_get_dma(dev_priv);
596 SetReg2DAGP(0x0C, (0 | (0 << 16))); 583 SetReg2DAGP(0x0C, (0 | (0 << 16)));
@@ -598,7 +585,7 @@ static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
598 SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000); 585 SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
599} 586}
600 587
601static void via_cmdbuf_jump(drm_via_private_t * dev_priv) 588static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
602{ 589{
603 uint32_t agp_base; 590 uint32_t agp_base;
604 uint32_t pause_addr_lo, pause_addr_hi; 591 uint32_t pause_addr_lo, pause_addr_hi;
@@ -617,9 +604,8 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
617 */ 604 */
618 605
619 dev_priv->dma_low = 0; 606 dev_priv->dma_low = 0;
620 if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) { 607 if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
621 DRM_ERROR("via_cmdbuf_jump failed\n"); 608 DRM_ERROR("via_cmdbuf_jump failed\n");
622 }
623 609
624 via_dummy_bitblt(dev_priv); 610 via_dummy_bitblt(dev_priv);
625 via_dummy_bitblt(dev_priv); 611 via_dummy_bitblt(dev_priv);
@@ -657,12 +643,12 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
657} 643}
658 644
659 645
660static void via_cmdbuf_rewind(drm_via_private_t * dev_priv) 646static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
661{ 647{
662 via_cmdbuf_jump(dev_priv); 648 via_cmdbuf_jump(dev_priv);
663} 649}
664 650
665static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type) 651static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
666{ 652{
667 uint32_t pause_addr_lo, pause_addr_hi; 653 uint32_t pause_addr_lo, pause_addr_hi;
668 654
@@ -670,12 +656,12 @@ static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
670 via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0); 656 via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
671} 657}
672 658
673static void via_cmdbuf_pause(drm_via_private_t * dev_priv) 659static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
674{ 660{
675 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE); 661 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
676} 662}
677 663
678static void via_cmdbuf_reset(drm_via_private_t * dev_priv) 664static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
679{ 665{
680 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP); 666 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
681 via_wait_idle(dev_priv); 667 via_wait_idle(dev_priv);
@@ -708,9 +694,8 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
708 case VIA_CMDBUF_SPACE: 694 case VIA_CMDBUF_SPACE:
709 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size) 695 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
710 && --count) { 696 && --count) {
711 if (!d_siz->wait) { 697 if (!d_siz->wait)
712 break; 698 break;
713 }
714 } 699 }
715 if (!count) { 700 if (!count) {
716 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n"); 701 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
@@ -720,9 +705,8 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
720 case VIA_CMDBUF_LAG: 705 case VIA_CMDBUF_LAG:
721 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size) 706 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
722 && --count) { 707 && --count) {
723 if (!d_siz->wait) { 708 if (!d_siz->wait)
724 break; 709 break;
725 }
726 } 710 }
727 if (!count) { 711 if (!count) {
728 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n"); 712 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 4c54f043068e..9b5b4d9dd62c 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -70,7 +70,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
70 descriptor_this_page; 70 descriptor_this_page;
71 dma_addr_t next = vsg->chain_start; 71 dma_addr_t next = vsg->chain_start;
72 72
73 while(num_desc--) { 73 while (num_desc--) {
74 if (descriptor_this_page-- == 0) { 74 if (descriptor_this_page-- == 0) {
75 cur_descriptor_page--; 75 cur_descriptor_page--;
76 descriptor_this_page = vsg->descriptors_per_page - 1; 76 descriptor_this_page = vsg->descriptors_per_page - 1;
@@ -174,19 +174,19 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
174 struct page *page; 174 struct page *page;
175 int i; 175 int i;
176 176
177 switch(vsg->state) { 177 switch (vsg->state) {
178 case dr_via_device_mapped: 178 case dr_via_device_mapped:
179 via_unmap_blit_from_device(pdev, vsg); 179 via_unmap_blit_from_device(pdev, vsg);
180 case dr_via_desc_pages_alloc: 180 case dr_via_desc_pages_alloc:
181 for (i=0; i<vsg->num_desc_pages; ++i) { 181 for (i = 0; i < vsg->num_desc_pages; ++i) {
182 if (vsg->desc_pages[i] != NULL) 182 if (vsg->desc_pages[i] != NULL)
183 free_page((unsigned long)vsg->desc_pages[i]); 183 free_page((unsigned long)vsg->desc_pages[i]);
184 } 184 }
185 kfree(vsg->desc_pages); 185 kfree(vsg->desc_pages);
186 case dr_via_pages_locked: 186 case dr_via_pages_locked:
187 for (i=0; i<vsg->num_pages; ++i) { 187 for (i = 0; i < vsg->num_pages; ++i) {
188 if ( NULL != (page = vsg->pages[i])) { 188 if (NULL != (page = vsg->pages[i])) {
189 if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 189 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
190 SetPageDirty(page); 190 SetPageDirty(page);
191 page_cache_release(page); 191 page_cache_release(page);
192 } 192 }
@@ -232,7 +232,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
232{ 232{
233 int ret; 233 int ret;
234 unsigned long first_pfn = VIA_PFN(xfer->mem_addr); 234 unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - 235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
236 first_pfn + 1; 236 first_pfn + 1;
237 237
238 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 238 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
@@ -268,7 +268,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
268{ 268{
269 int i; 269 int i;
270 270
271 vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); 271 vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
272 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 272 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
273 vsg->descriptors_per_page; 273 vsg->descriptors_per_page;
274 274
@@ -276,7 +276,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
276 return -ENOMEM; 276 return -ENOMEM;
277 277
278 vsg->state = dr_via_desc_pages_alloc; 278 vsg->state = dr_via_desc_pages_alloc;
279 for (i=0; i<vsg->num_desc_pages; ++i) { 279 for (i = 0; i < vsg->num_desc_pages; ++i) {
280 if (NULL == (vsg->desc_pages[i] = 280 if (NULL == (vsg->desc_pages[i] =
281 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 281 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
282 return -ENOMEM; 282 return -ENOMEM;
@@ -318,21 +318,20 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
318 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 318 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
319 int cur; 319 int cur;
320 int done_transfer; 320 int done_transfer;
321 unsigned long irqsave=0; 321 unsigned long irqsave = 0;
322 uint32_t status = 0; 322 uint32_t status = 0;
323 323
324 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", 324 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
325 engine, from_irq, (unsigned long) blitq); 325 engine, from_irq, (unsigned long) blitq);
326 326
327 if (from_irq) { 327 if (from_irq)
328 spin_lock(&blitq->blit_lock); 328 spin_lock(&blitq->blit_lock);
329 } else { 329 else
330 spin_lock_irqsave(&blitq->blit_lock, irqsave); 330 spin_lock_irqsave(&blitq->blit_lock, irqsave);
331 }
332 331
333 done_transfer = blitq->is_active && 332 done_transfer = blitq->is_active &&
334 (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); 333 ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
335 done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); 334 done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
336 335
337 cur = blitq->cur; 336 cur = blitq->cur;
338 if (done_transfer) { 337 if (done_transfer) {
@@ -377,18 +376,16 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
377 if (!timer_pending(&blitq->poll_timer)) 376 if (!timer_pending(&blitq->poll_timer))
378 mod_timer(&blitq->poll_timer, jiffies + 1); 377 mod_timer(&blitq->poll_timer, jiffies + 1);
379 } else { 378 } else {
380 if (timer_pending(&blitq->poll_timer)) { 379 if (timer_pending(&blitq->poll_timer))
381 del_timer(&blitq->poll_timer); 380 del_timer(&blitq->poll_timer);
382 }
383 via_dmablit_engine_off(dev, engine); 381 via_dmablit_engine_off(dev, engine);
384 } 382 }
385 } 383 }
386 384
387 if (from_irq) { 385 if (from_irq)
388 spin_unlock(&blitq->blit_lock); 386 spin_unlock(&blitq->blit_lock);
389 } else { 387 else
390 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 388 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
391 }
392} 389}
393 390
394 391
@@ -414,10 +411,9 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
414 ((blitq->cur_blit_handle - handle) <= (1 << 23)); 411 ((blitq->cur_blit_handle - handle) <= (1 << 23));
415 412
416 if (queue && active) { 413 if (queue && active) {
417 slot = handle - blitq->done_blit_handle + blitq->cur -1; 414 slot = handle - blitq->done_blit_handle + blitq->cur - 1;
418 if (slot >= VIA_NUM_BLIT_SLOTS) { 415 if (slot >= VIA_NUM_BLIT_SLOTS)
419 slot -= VIA_NUM_BLIT_SLOTS; 416 slot -= VIA_NUM_BLIT_SLOTS;
420 }
421 *queue = blitq->blit_queue + slot; 417 *queue = blitq->blit_queue + slot;
422 } 418 }
423 419
@@ -506,12 +502,12 @@ via_dmablit_workqueue(struct work_struct *work)
506 int cur_released; 502 int cur_released;
507 503
508 504
509 DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) 505 DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
510 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); 506 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
511 507
512 spin_lock_irqsave(&blitq->blit_lock, irqsave); 508 spin_lock_irqsave(&blitq->blit_lock, irqsave);
513 509
514 while(blitq->serviced != blitq->cur) { 510 while (blitq->serviced != blitq->cur) {
515 511
516 cur_released = blitq->serviced++; 512 cur_released = blitq->serviced++;
517 513
@@ -545,13 +541,13 @@ via_dmablit_workqueue(struct work_struct *work)
545void 541void
546via_init_dmablit(struct drm_device *dev) 542via_init_dmablit(struct drm_device *dev)
547{ 543{
548 int i,j; 544 int i, j;
549 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 545 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
550 drm_via_blitq_t *blitq; 546 drm_via_blitq_t *blitq;
551 547
552 pci_set_master(dev->pdev); 548 pci_set_master(dev->pdev);
553 549
554 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) { 550 for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
555 blitq = dev_priv->blit_queues + i; 551 blitq = dev_priv->blit_queues + i;
556 blitq->dev = dev; 552 blitq->dev = dev;
557 blitq->cur_blit_handle = 0; 553 blitq->cur_blit_handle = 0;
@@ -564,9 +560,8 @@ via_init_dmablit(struct drm_device *dev)
564 blitq->is_active = 0; 560 blitq->is_active = 0;
565 blitq->aborting = 0; 561 blitq->aborting = 0;
566 spin_lock_init(&blitq->blit_lock); 562 spin_lock_init(&blitq->blit_lock);
567 for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { 563 for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
568 DRM_INIT_WAITQUEUE(blitq->blit_queue + j); 564 DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
569 }
570 DRM_INIT_WAITQUEUE(&blitq->busy_queue); 565 DRM_INIT_WAITQUEUE(&blitq->busy_queue);
571 INIT_WORK(&blitq->wq, via_dmablit_workqueue); 566 INIT_WORK(&blitq->wq, via_dmablit_workqueue);
572 setup_timer(&blitq->poll_timer, via_dmablit_timer, 567 setup_timer(&blitq->poll_timer, via_dmablit_timer,
@@ -685,18 +680,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
685static int 680static int
686via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) 681via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
687{ 682{
688 int ret=0; 683 int ret = 0;
689 unsigned long irqsave; 684 unsigned long irqsave;
690 685
691 DRM_DEBUG("Num free is %d\n", blitq->num_free); 686 DRM_DEBUG("Num free is %d\n", blitq->num_free);
692 spin_lock_irqsave(&blitq->blit_lock, irqsave); 687 spin_lock_irqsave(&blitq->blit_lock, irqsave);
693 while(blitq->num_free == 0) { 688 while (blitq->num_free == 0) {
694 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 689 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
695 690
696 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); 691 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
697 if (ret) { 692 if (ret)
698 return (-EINTR == ret) ? -EAGAIN : ret; 693 return (-EINTR == ret) ? -EAGAIN : ret;
699 }
700 694
701 spin_lock_irqsave(&blitq->blit_lock, irqsave); 695 spin_lock_irqsave(&blitq->blit_lock, irqsave);
702 } 696 }
@@ -719,7 +713,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
719 spin_lock_irqsave(&blitq->blit_lock, irqsave); 713 spin_lock_irqsave(&blitq->blit_lock, irqsave);
720 blitq->num_free++; 714 blitq->num_free++;
721 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 715 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
722 DRM_WAKEUP( &blitq->busy_queue ); 716 DRM_WAKEUP(&blitq->busy_queue);
723} 717}
724 718
725/* 719/*
@@ -744,9 +738,8 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
744 738
745 engine = (xfer->to_fb) ? 0 : 1; 739 engine = (xfer->to_fb) ? 0 : 1;
746 blitq = dev_priv->blit_queues + engine; 740 blitq = dev_priv->blit_queues + engine;
747 if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) { 741 if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
748 return ret; 742 return ret;
749 }
750 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { 743 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
751 via_dmablit_release_slot(blitq); 744 via_dmablit_release_slot(blitq);
752 return -ENOMEM; 745 return -ENOMEM;
@@ -780,7 +773,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
780 */ 773 */
781 774
782int 775int
783via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) 776via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
784{ 777{
785 drm_via_blitsync_t *sync = data; 778 drm_via_blitsync_t *sync = data;
786 int err; 779 int err;
@@ -804,7 +797,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
804 */ 797 */
805 798
806int 799int
807via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) 800via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
808{ 801{
809 drm_via_dmablit_t *xfer = data; 802 drm_via_dmablit_t *xfer = data;
810 int err; 803 int err;
diff --git a/drivers/gpu/drm/via/via_dmablit.h b/drivers/gpu/drm/via/via_dmablit.h
index 7408a547a036..9b662a327cef 100644
--- a/drivers/gpu/drm/via/via_dmablit.h
+++ b/drivers/gpu/drm/via/via_dmablit.h
@@ -45,12 +45,12 @@ typedef struct _drm_via_sg_info {
45 int num_desc; 45 int num_desc;
46 enum dma_data_direction direction; 46 enum dma_data_direction direction;
47 unsigned char *bounce_buffer; 47 unsigned char *bounce_buffer;
48 dma_addr_t chain_start; 48 dma_addr_t chain_start;
49 uint32_t free_on_sequence; 49 uint32_t free_on_sequence;
50 unsigned int descriptors_per_page; 50 unsigned int descriptors_per_page;
51 int aborted; 51 int aborted;
52 enum { 52 enum {
53 dr_via_device_mapped, 53 dr_via_device_mapped,
54 dr_via_desc_pages_alloc, 54 dr_via_desc_pages_alloc,
55 dr_via_pages_locked, 55 dr_via_pages_locked,
56 dr_via_pages_alloc, 56 dr_via_pages_alloc,
@@ -68,7 +68,7 @@ typedef struct _drm_via_blitq {
68 unsigned num_free; 68 unsigned num_free;
69 unsigned num_outstanding; 69 unsigned num_outstanding;
70 unsigned long end; 70 unsigned long end;
71 int aborting; 71 int aborting;
72 int is_active; 72 int is_active;
73 drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS]; 73 drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
74 spinlock_t blit_lock; 74 spinlock_t blit_lock;
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index cafcb844a223..9cf87d912325 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -107,9 +107,9 @@ enum via_family {
107#define VIA_BASE ((dev_priv->mmio)) 107#define VIA_BASE ((dev_priv->mmio))
108 108
109#define VIA_READ(reg) DRM_READ32(VIA_BASE, reg) 109#define VIA_READ(reg) DRM_READ32(VIA_BASE, reg)
110#define VIA_WRITE(reg,val) DRM_WRITE32(VIA_BASE, reg, val) 110#define VIA_WRITE(reg, val) DRM_WRITE32(VIA_BASE, reg, val)
111#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 111#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
112#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) 112#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val)
113 113
114extern struct drm_ioctl_desc via_ioctls[]; 114extern struct drm_ioctl_desc via_ioctls[];
115extern int via_max_ioctl; 115extern int via_max_ioctl;
@@ -121,28 +121,28 @@ extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *fil
121extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv); 121extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
122extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv); 122extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
123extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv); 123extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
124extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ); 124extern int via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv);
125extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ); 125extern int via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv);
126 126
127extern int via_driver_load(struct drm_device *dev, unsigned long chipset); 127extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
128extern int via_driver_unload(struct drm_device *dev); 128extern int via_driver_unload(struct drm_device *dev);
129 129
130extern int via_init_context(struct drm_device * dev, int context); 130extern int via_init_context(struct drm_device *dev, int context);
131extern int via_final_context(struct drm_device * dev, int context); 131extern int via_final_context(struct drm_device *dev, int context);
132 132
133extern int via_do_cleanup_map(struct drm_device * dev); 133extern int via_do_cleanup_map(struct drm_device *dev);
134extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc); 134extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
135extern int via_enable_vblank(struct drm_device *dev, int crtc); 135extern int via_enable_vblank(struct drm_device *dev, int crtc);
136extern void via_disable_vblank(struct drm_device *dev, int crtc); 136extern void via_disable_vblank(struct drm_device *dev, int crtc);
137 137
138extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); 138extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
139extern void via_driver_irq_preinstall(struct drm_device * dev); 139extern void via_driver_irq_preinstall(struct drm_device *dev);
140extern int via_driver_irq_postinstall(struct drm_device *dev); 140extern int via_driver_irq_postinstall(struct drm_device *dev);
141extern void via_driver_irq_uninstall(struct drm_device * dev); 141extern void via_driver_irq_uninstall(struct drm_device *dev);
142 142
143extern int via_dma_cleanup(struct drm_device * dev); 143extern int via_dma_cleanup(struct drm_device *dev);
144extern void via_init_command_verifier(void); 144extern void via_init_command_verifier(void);
145extern int via_driver_dma_quiescent(struct drm_device * dev); 145extern int via_driver_dma_quiescent(struct drm_device *dev);
146extern void via_init_futex(drm_via_private_t *dev_priv); 146extern void via_init_futex(drm_via_private_t *dev_priv);
147extern void via_cleanup_futex(drm_via_private_t *dev_priv); 147extern void via_cleanup_futex(drm_via_private_t *dev_priv);
148extern void via_release_futex(drm_via_private_t *dev_priv, int context); 148extern void via_release_futex(drm_via_private_t *dev_priv, int context);
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index 34079f251cd4..d391f48ef87a 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -141,11 +141,10 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
141 atomic_inc(&cur_irq->irq_received); 141 atomic_inc(&cur_irq->irq_received);
142 DRM_WAKEUP(&cur_irq->irq_queue); 142 DRM_WAKEUP(&cur_irq->irq_queue);
143 handled = 1; 143 handled = 1;
144 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) { 144 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
145 via_dmablit_handler(dev, 0, 1); 145 via_dmablit_handler(dev, 0, 1);
146 } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) { 146 else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
147 via_dmablit_handler(dev, 1, 1); 147 via_dmablit_handler(dev, 1, 1);
148 }
149 } 148 }
150 cur_irq++; 149 cur_irq++;
151 } 150 }
@@ -160,7 +159,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
160 return IRQ_NONE; 159 return IRQ_NONE;
161} 160}
162 161
163static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv) 162static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
164{ 163{
165 u32 status; 164 u32 status;
166 165
@@ -207,7 +206,7 @@ void via_disable_vblank(struct drm_device *dev, int crtc)
207} 206}
208 207
209static int 208static int
210via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence, 209via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
211 unsigned int *sequence) 210 unsigned int *sequence)
212{ 211{
213 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 212 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -260,7 +259,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
260 * drm_dma.h hooks 259 * drm_dma.h hooks
261 */ 260 */
262 261
263void via_driver_irq_preinstall(struct drm_device * dev) 262void via_driver_irq_preinstall(struct drm_device *dev)
264{ 263{
265 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 264 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
266 u32 status; 265 u32 status;
@@ -329,7 +328,7 @@ int via_driver_irq_postinstall(struct drm_device *dev)
329 return 0; 328 return 0;
330} 329}
331 330
332void via_driver_irq_uninstall(struct drm_device * dev) 331void via_driver_irq_uninstall(struct drm_device *dev)
333{ 332{
334 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 333 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
335 u32 status; 334 u32 status;
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 6e6f91591639..6cca9a709f7a 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -25,7 +25,7 @@
25#include "via_drm.h" 25#include "via_drm.h"
26#include "via_drv.h" 26#include "via_drv.h"
27 27
28static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init) 28static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
29{ 29{
30 drm_via_private_t *dev_priv = dev->dev_private; 30 drm_via_private_t *dev_priv = dev->dev_private;
31 31
@@ -68,7 +68,7 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
68 return 0; 68 return 0;
69} 69}
70 70
71int via_do_cleanup_map(struct drm_device * dev) 71int via_do_cleanup_map(struct drm_device *dev)
72{ 72{
73 via_dma_cleanup(dev); 73 via_dma_cleanup(dev);
74 74
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index f694cb5ededc..6cc2dadae3ef 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -31,7 +31,7 @@
31#include "drm_sman.h" 31#include "drm_sman.h"
32 32
33#define VIA_MM_ALIGN_SHIFT 4 33#define VIA_MM_ALIGN_SHIFT 4
34#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1) 34#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
35 35
36int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 36int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
37{ 37{
@@ -172,7 +172,7 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
172} 172}
173 173
174 174
175void via_reclaim_buffers_locked(struct drm_device * dev, 175void via_reclaim_buffers_locked(struct drm_device *dev,
176 struct drm_file *file_priv) 176 struct drm_file *file_priv)
177{ 177{
178 drm_via_private_t *dev_priv = dev->dev_private; 178 drm_via_private_t *dev_priv = dev->dev_private;
@@ -183,9 +183,8 @@ void via_reclaim_buffers_locked(struct drm_device * dev,
183 return; 183 return;
184 } 184 }
185 185
186 if (dev->driver->dma_quiescent) { 186 if (dev->driver->dma_quiescent)
187 dev->driver->dma_quiescent(dev); 187 dev->driver->dma_quiescent(dev);
188 }
189 188
190 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); 189 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
191 mutex_unlock(&dev->struct_mutex); 190 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index 46a579198747..48957b856d41 100644
--- a/drivers/gpu/drm/via/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -235,7 +235,7 @@ static hazard_t table2[256];
235static hazard_t table3[256]; 235static hazard_t table3[256];
236 236
237static __inline__ int 237static __inline__ int
238eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words) 238eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
239{ 239{
240 if ((buf_end - *buf) >= num_words) { 240 if ((buf_end - *buf) >= num_words) {
241 *buf += num_words; 241 *buf += num_words;
@@ -252,7 +252,7 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
252static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq, 252static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
253 unsigned long offset, 253 unsigned long offset,
254 unsigned long size, 254 unsigned long size,
255 struct drm_device * dev) 255 struct drm_device *dev)
256{ 256{
257 struct drm_map_list *r_list; 257 struct drm_map_list *r_list;
258 drm_local_map_t *map = seq->map_cache; 258 drm_local_map_t *map = seq->map_cache;
@@ -344,7 +344,7 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
344} 344}
345 345
346static __inline__ int 346static __inline__ int
347investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq) 347investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
348{ 348{
349 register uint32_t tmp, *tmp_addr; 349 register uint32_t tmp, *tmp_addr;
350 350
@@ -518,7 +518,7 @@ investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
518 518
519static __inline__ int 519static __inline__ int
520via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end, 520via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
521 drm_via_state_t * cur_seq) 521 drm_via_state_t *cur_seq)
522{ 522{
523 drm_via_private_t *dev_priv = 523 drm_via_private_t *dev_priv =
524 (drm_via_private_t *) cur_seq->dev->dev_private; 524 (drm_via_private_t *) cur_seq->dev->dev_private;
@@ -621,8 +621,8 @@ via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
621} 621}
622 622
623static __inline__ verifier_state_t 623static __inline__ verifier_state_t
624via_check_header2(uint32_t const **buffer, const uint32_t * buf_end, 624via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
625 drm_via_state_t * hc_state) 625 drm_via_state_t *hc_state)
626{ 626{
627 uint32_t cmd; 627 uint32_t cmd;
628 int hz_mode; 628 int hz_mode;
@@ -706,16 +706,15 @@ via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
706 return state_error; 706 return state_error;
707 } 707 }
708 } 708 }
709 if (hc_state->unfinished && finish_current_sequence(hc_state)) { 709 if (hc_state->unfinished && finish_current_sequence(hc_state))
710 return state_error; 710 return state_error;
711 }
712 *buffer = buf; 711 *buffer = buf;
713 return state_command; 712 return state_command;
714} 713}
715 714
716static __inline__ verifier_state_t 715static __inline__ verifier_state_t
717via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer, 716via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
718 const uint32_t * buf_end, int *fire_count) 717 const uint32_t *buf_end, int *fire_count)
719{ 718{
720 uint32_t cmd; 719 uint32_t cmd;
721 const uint32_t *buf = *buffer; 720 const uint32_t *buf = *buffer;
@@ -833,8 +832,8 @@ via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
833} 832}
834 833
835static __inline__ verifier_state_t 834static __inline__ verifier_state_t
836via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer, 835via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
837 const uint32_t * buf_end) 836 const uint32_t *buf_end)
838{ 837{
839 register uint32_t cmd; 838 register uint32_t cmd;
840 const uint32_t *buf = *buffer; 839 const uint32_t *buf = *buffer;
@@ -851,7 +850,7 @@ via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
851} 850}
852 851
853static __inline__ verifier_state_t 852static __inline__ verifier_state_t
854via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end) 853via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
855{ 854{
856 uint32_t data; 855 uint32_t data;
857 const uint32_t *buf = *buffer; 856 const uint32_t *buf = *buffer;
@@ -884,8 +883,8 @@ via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
884} 883}
885 884
886static __inline__ verifier_state_t 885static __inline__ verifier_state_t
887via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer, 886via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
888 const uint32_t * buf_end) 887 const uint32_t *buf_end)
889{ 888{
890 uint32_t addr, count, i; 889 uint32_t addr, count, i;
891 const uint32_t *buf = *buffer; 890 const uint32_t *buf = *buffer;
@@ -893,9 +892,8 @@ via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
893 addr = *buf++ & ~VIA_VIDEOMASK; 892 addr = *buf++ & ~VIA_VIDEOMASK;
894 i = count = *buf; 893 i = count = *buf;
895 buf += 3; 894 buf += 3;
896 while (i--) { 895 while (i--)
897 VIA_WRITE(addr, *buf++); 896 VIA_WRITE(addr, *buf++);
898 }
899 if (count & 3) 897 if (count & 3)
900 buf += 4 - (count & 3); 898 buf += 4 - (count & 3);
901 *buffer = buf; 899 *buffer = buf;
@@ -940,8 +938,8 @@ via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
940} 938}
941 939
942static __inline__ verifier_state_t 940static __inline__ verifier_state_t
943via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer, 941via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
944 const uint32_t * buf_end) 942 const uint32_t *buf_end)
945{ 943{
946 944
947 uint32_t addr, count, i; 945 uint32_t addr, count, i;
@@ -1037,7 +1035,7 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
1037} 1035}
1038 1036
1039int 1037int
1040via_parse_command_stream(struct drm_device * dev, const uint32_t * buf, 1038via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
1041 unsigned int size) 1039 unsigned int size)
1042{ 1040{
1043 1041
@@ -1085,9 +1083,8 @@ via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
1085 return -EINVAL; 1083 return -EINVAL;
1086 } 1084 }
1087 } 1085 }
1088 if (state == state_error) { 1086 if (state == state_error)
1089 return -EINVAL; 1087 return -EINVAL;
1090 }
1091 return 0; 1088 return 0;
1092} 1089}
1093 1090
@@ -1096,13 +1093,11 @@ setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
1096{ 1093{
1097 int i; 1094 int i;
1098 1095
1099 for (i = 0; i < 256; ++i) { 1096 for (i = 0; i < 256; ++i)
1100 table[i] = forbidden_command; 1097 table[i] = forbidden_command;
1101 }
1102 1098
1103 for (i = 0; i < size; ++i) { 1099 for (i = 0; i < size; ++i)
1104 table[init_table[i].code] = init_table[i].hz; 1100 table[init_table[i].code] = init_table[i].hz;
1105 }
1106} 1101}
1107 1102
1108void via_init_command_verifier(void) 1103void via_init_command_verifier(void)
diff --git a/drivers/gpu/drm/via/via_verifier.h b/drivers/gpu/drm/via/via_verifier.h
index d6f8214b69f5..26b6d361ab95 100644
--- a/drivers/gpu/drm/via/via_verifier.h
+++ b/drivers/gpu/drm/via/via_verifier.h
@@ -54,8 +54,8 @@ typedef struct {
54 const uint32_t *buf_start; 54 const uint32_t *buf_start;
55} drm_via_state_t; 55} drm_via_state_t;
56 56
57extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, 57extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,
58 struct drm_device * dev, int agp); 58 struct drm_device *dev, int agp);
59extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf, 59extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
60 unsigned int size); 60 unsigned int size);
61 61
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6efac8117c93..675d311f038f 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -29,7 +29,7 @@
29#include "via_drm.h" 29#include "via_drm.h"
30#include "via_drv.h" 30#include "via_drv.h"
31 31
32void via_init_futex(drm_via_private_t * dev_priv) 32void via_init_futex(drm_via_private_t *dev_priv)
33{ 33{
34 unsigned int i; 34 unsigned int i;
35 35
@@ -41,11 +41,11 @@ void via_init_futex(drm_via_private_t * dev_priv)
41 } 41 }
42} 42}
43 43
44void via_cleanup_futex(drm_via_private_t * dev_priv) 44void via_cleanup_futex(drm_via_private_t *dev_priv)
45{ 45{
46} 46}
47 47
48void via_release_futex(drm_via_private_t * dev_priv, int context) 48void via_release_futex(drm_via_private_t *dev_priv, int context)
49{ 49{
50 unsigned int i; 50 unsigned int i;
51 volatile int *lock; 51 volatile int *lock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b0866f04ec76..870967a97c15 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -528,7 +528,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
528 * Dirty & Deferred IO 528 * Dirty & Deferred IO
529 */ 529 */
530 par->dirty.x1 = par->dirty.x2 = 0; 530 par->dirty.x1 = par->dirty.x2 = 0;
531 par->dirty.y1 = par->dirty.y1 = 0; 531 par->dirty.y1 = par->dirty.y2 = 0;
532 par->dirty.active = true; 532 par->dirty.active = true;
533 spin_lock_init(&par->dirty.lock); 533 spin_lock_init(&par->dirty.lock);
534 info->fbdefio = &vmw_defio; 534 info->fbdefio = &vmw_defio;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index f1d626112415..437ac786277a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -972,6 +972,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
972 ret = copy_from_user(rects, user_rects, rects_size); 972 ret = copy_from_user(rects, user_rects, rects_size);
973 if (unlikely(ret != 0)) { 973 if (unlikely(ret != 0)) {
974 DRM_ERROR("Failed to get rects.\n"); 974 DRM_ERROR("Failed to get rects.\n");
975 ret = -EFAULT;
975 goto out_free; 976 goto out_free;
976 } 977 }
977 978