diff options
Diffstat (limited to 'drivers/gpu')
30 files changed, 2350 insertions, 625 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 5130b72d593c..3a22eb9be378 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -70,7 +70,7 @@ config DRM_I915 | |||
70 | select FB_CFB_FILLRECT | 70 | select FB_CFB_FILLRECT |
71 | select FB_CFB_COPYAREA | 71 | select FB_CFB_COPYAREA |
72 | select FB_CFB_IMAGEBLIT | 72 | select FB_CFB_IMAGEBLIT |
73 | depends on FB | 73 | select FB |
74 | tristate "i915 driver" | 74 | tristate "i915 driver" |
75 | help | 75 | help |
76 | Choose this option if you have a system that has Intel 830M, 845G, | 76 | Choose this option if you have a system that has Intel 830M, 845G, |
@@ -80,18 +80,17 @@ config DRM_I915 | |||
80 | XFree86 4.4 and above. If unsure, build this and i830 as modules and | 80 | XFree86 4.4 and above. If unsure, build this and i830 as modules and |
81 | the X server will load the correct one. | 81 | the X server will load the correct one. |
82 | 82 | ||
83 | endchoice | ||
84 | |||
85 | config DRM_I915_KMS | 83 | config DRM_I915_KMS |
86 | bool "Enable modesetting on intel by default" | 84 | bool "Enable modesetting on intel by default" |
87 | depends on DRM_I915 | 85 | depends on DRM_I915 |
88 | help | 86 | help |
89 | Choose this option if you want kernel modesetting enabled by default, | 87 | Choose this option if you want kernel modesetting enabled by default, |
90 | and you have a new enough userspace to support this. Running old | 88 | and you have a new enough userspace to support this. Running old |
91 | userspaces with this enabled will cause pain. Note that this causes | 89 | userspaces with this enabled will cause pain. Note that this causes |
92 | the driver to bind to PCI devices, which precludes loading things | 90 | the driver to bind to PCI devices, which precludes loading things |
93 | like intelfb. | 91 | like intelfb. |
94 | 92 | ||
93 | endchoice | ||
95 | 94 | ||
96 | config DRM_MGA | 95 | config DRM_MGA |
97 | tristate "Matrox g200/g400" | 96 | tristate "Matrox g200/g400" |
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index 3d33b8252b58..14796594e5d9 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c | |||
@@ -33,10 +33,11 @@ | |||
33 | 33 | ||
34 | #include "drmP.h" | 34 | #include "drmP.h" |
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <asm/agp.h> | ||
37 | 36 | ||
38 | #if __OS_HAS_AGP | 37 | #if __OS_HAS_AGP |
39 | 38 | ||
39 | #include <asm/agp.h> | ||
40 | |||
40 | /** | 41 | /** |
41 | * Get AGP information. | 42 | * Get AGP information. |
42 | * | 43 | * |
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 72c667f9bee1..12715d3c078d 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c | |||
@@ -420,7 +420,7 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) | |||
420 | dev->sigdata.lock = NULL; | 420 | dev->sigdata.lock = NULL; |
421 | master->lock.hw_lock = NULL; /* SHM removed */ | 421 | master->lock.hw_lock = NULL; /* SHM removed */ |
422 | master->lock.file_priv = NULL; | 422 | master->lock.file_priv = NULL; |
423 | wake_up_interruptible(&master->lock.lock_queue); | 423 | wake_up_interruptible_all(&master->lock.lock_queue); |
424 | } | 424 | } |
425 | break; | 425 | break; |
426 | case _DRM_AGP: | 426 | case _DRM_AGP: |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 5b2cbb778162..94a768871734 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -194,7 +194,6 @@ char *drm_get_connector_status_name(enum drm_connector_status status) | |||
194 | * @type: object type | 194 | * @type: object type |
195 | * | 195 | * |
196 | * LOCKING: | 196 | * LOCKING: |
197 | * Caller must hold DRM mode_config lock. | ||
198 | * | 197 | * |
199 | * Create a unique identifier based on @ptr in @dev's identifier space. Used | 198 | * Create a unique identifier based on @ptr in @dev's identifier space. Used |
200 | * for tracking modes, CRTCs and connectors. | 199 | * for tracking modes, CRTCs and connectors. |
@@ -209,15 +208,15 @@ static int drm_mode_object_get(struct drm_device *dev, | |||
209 | int new_id = 0; | 208 | int new_id = 0; |
210 | int ret; | 209 | int ret; |
211 | 210 | ||
212 | WARN(!mutex_is_locked(&dev->mode_config.mutex), | ||
213 | "%s called w/o mode_config lock\n", __func__); | ||
214 | again: | 211 | again: |
215 | if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { | 212 | if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { |
216 | DRM_ERROR("Ran out memory getting a mode number\n"); | 213 | DRM_ERROR("Ran out memory getting a mode number\n"); |
217 | return -EINVAL; | 214 | return -EINVAL; |
218 | } | 215 | } |
219 | 216 | ||
217 | mutex_lock(&dev->mode_config.idr_mutex); | ||
220 | ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); | 218 | ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); |
219 | mutex_unlock(&dev->mode_config.idr_mutex); | ||
221 | if (ret == -EAGAIN) | 220 | if (ret == -EAGAIN) |
222 | goto again; | 221 | goto again; |
223 | 222 | ||
@@ -239,16 +238,20 @@ again: | |||
239 | static void drm_mode_object_put(struct drm_device *dev, | 238 | static void drm_mode_object_put(struct drm_device *dev, |
240 | struct drm_mode_object *object) | 239 | struct drm_mode_object *object) |
241 | { | 240 | { |
241 | mutex_lock(&dev->mode_config.idr_mutex); | ||
242 | idr_remove(&dev->mode_config.crtc_idr, object->id); | 242 | idr_remove(&dev->mode_config.crtc_idr, object->id); |
243 | mutex_unlock(&dev->mode_config.idr_mutex); | ||
243 | } | 244 | } |
244 | 245 | ||
245 | void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) | 246 | void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) |
246 | { | 247 | { |
247 | struct drm_mode_object *obj; | 248 | struct drm_mode_object *obj = NULL; |
248 | 249 | ||
250 | mutex_lock(&dev->mode_config.idr_mutex); | ||
249 | obj = idr_find(&dev->mode_config.crtc_idr, id); | 251 | obj = idr_find(&dev->mode_config.crtc_idr, id); |
250 | if (!obj || (obj->type != type) || (obj->id != id)) | 252 | if (!obj || (obj->type != type) || (obj->id != id)) |
251 | return NULL; | 253 | obj = NULL; |
254 | mutex_unlock(&dev->mode_config.idr_mutex); | ||
252 | 255 | ||
253 | return obj; | 256 | return obj; |
254 | } | 257 | } |
@@ -786,6 +789,7 @@ EXPORT_SYMBOL(drm_mode_create_dithering_property); | |||
786 | void drm_mode_config_init(struct drm_device *dev) | 789 | void drm_mode_config_init(struct drm_device *dev) |
787 | { | 790 | { |
788 | mutex_init(&dev->mode_config.mutex); | 791 | mutex_init(&dev->mode_config.mutex); |
792 | mutex_init(&dev->mode_config.idr_mutex); | ||
789 | INIT_LIST_HEAD(&dev->mode_config.fb_list); | 793 | INIT_LIST_HEAD(&dev->mode_config.fb_list); |
790 | INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list); | 794 | INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list); |
791 | INIT_LIST_HEAD(&dev->mode_config.crtc_list); | 795 | INIT_LIST_HEAD(&dev->mode_config.crtc_list); |
@@ -1737,9 +1741,8 @@ out: | |||
1737 | * RETURNS: | 1741 | * RETURNS: |
1738 | * Zero on success, errno on failure. | 1742 | * Zero on success, errno on failure. |
1739 | */ | 1743 | */ |
1740 | void drm_fb_release(struct file *filp) | 1744 | void drm_fb_release(struct drm_file *priv) |
1741 | { | 1745 | { |
1742 | struct drm_file *priv = filp->private_data; | ||
1743 | struct drm_device *dev = priv->minor->dev; | 1746 | struct drm_device *dev = priv->minor->dev; |
1744 | struct drm_framebuffer *fb, *tfb; | 1747 | struct drm_framebuffer *fb, *tfb; |
1745 | 1748 | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index d8a982b71296..1c3a8c557140 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -36,7 +36,7 @@ | |||
36 | /* | 36 | /* |
37 | * Detailed mode info for 800x600@60Hz | 37 | * Detailed mode info for 800x600@60Hz |
38 | */ | 38 | */ |
39 | static struct drm_display_mode std_mode[] = { | 39 | static struct drm_display_mode std_modes[] = { |
40 | { DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840, | 40 | { DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840, |
41 | 968, 1056, 0, 600, 601, 605, 628, 0, | 41 | 968, 1056, 0, 600, 601, 605, 628, 0, |
42 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | 42 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
@@ -60,15 +60,18 @@ static struct drm_display_mode std_mode[] = { | |||
60 | * changes have occurred. | 60 | * changes have occurred. |
61 | * | 61 | * |
62 | * FIXME: take into account monitor limits | 62 | * FIXME: take into account monitor limits |
63 | * | ||
64 | * RETURNS: | ||
65 | * Number of modes found on @connector. | ||
63 | */ | 66 | */ |
64 | void drm_helper_probe_single_connector_modes(struct drm_connector *connector, | 67 | int drm_helper_probe_single_connector_modes(struct drm_connector *connector, |
65 | uint32_t maxX, uint32_t maxY) | 68 | uint32_t maxX, uint32_t maxY) |
66 | { | 69 | { |
67 | struct drm_device *dev = connector->dev; | 70 | struct drm_device *dev = connector->dev; |
68 | struct drm_display_mode *mode, *t; | 71 | struct drm_display_mode *mode, *t; |
69 | struct drm_connector_helper_funcs *connector_funcs = | 72 | struct drm_connector_helper_funcs *connector_funcs = |
70 | connector->helper_private; | 73 | connector->helper_private; |
71 | int ret; | 74 | int count = 0; |
72 | 75 | ||
73 | DRM_DEBUG("%s\n", drm_get_connector_name(connector)); | 76 | DRM_DEBUG("%s\n", drm_get_connector_name(connector)); |
74 | /* set all modes to the unverified state */ | 77 | /* set all modes to the unverified state */ |
@@ -81,14 +84,14 @@ void drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
81 | DRM_DEBUG("%s is disconnected\n", | 84 | DRM_DEBUG("%s is disconnected\n", |
82 | drm_get_connector_name(connector)); | 85 | drm_get_connector_name(connector)); |
83 | /* TODO set EDID to NULL */ | 86 | /* TODO set EDID to NULL */ |
84 | return; | 87 | return 0; |
85 | } | 88 | } |
86 | 89 | ||
87 | ret = (*connector_funcs->get_modes)(connector); | 90 | count = (*connector_funcs->get_modes)(connector); |
91 | if (!count) | ||
92 | return 0; | ||
88 | 93 | ||
89 | if (ret) { | 94 | drm_mode_connector_list_update(connector); |
90 | drm_mode_connector_list_update(connector); | ||
91 | } | ||
92 | 95 | ||
93 | if (maxX && maxY) | 96 | if (maxX && maxY) |
94 | drm_mode_validate_size(dev, &connector->modes, maxX, | 97 | drm_mode_validate_size(dev, &connector->modes, maxX, |
@@ -102,25 +105,8 @@ void drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
102 | 105 | ||
103 | drm_mode_prune_invalid(dev, &connector->modes, true); | 106 | drm_mode_prune_invalid(dev, &connector->modes, true); |
104 | 107 | ||
105 | if (list_empty(&connector->modes)) { | 108 | if (list_empty(&connector->modes)) |
106 | struct drm_display_mode *stdmode; | 109 | return 0; |
107 | |||
108 | DRM_DEBUG("No valid modes on %s\n", | ||
109 | drm_get_connector_name(connector)); | ||
110 | |||
111 | /* Should we do this here ??? | ||
112 | * When no valid EDID modes are available we end up | ||
113 | * here and bailed in the past, now we add a standard | ||
114 | * 640x480@60Hz mode and carry on. | ||
115 | */ | ||
116 | stdmode = drm_mode_duplicate(dev, &std_mode[0]); | ||
117 | drm_mode_probed_add(connector, stdmode); | ||
118 | drm_mode_list_concat(&connector->probed_modes, | ||
119 | &connector->modes); | ||
120 | |||
121 | DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n", | ||
122 | drm_get_connector_name(connector)); | ||
123 | } | ||
124 | 110 | ||
125 | drm_mode_sort(&connector->modes); | 111 | drm_mode_sort(&connector->modes); |
126 | 112 | ||
@@ -131,20 +117,58 @@ void drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
131 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | 117 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); |
132 | drm_mode_debug_printmodeline(mode); | 118 | drm_mode_debug_printmodeline(mode); |
133 | } | 119 | } |
120 | |||
121 | return count; | ||
134 | } | 122 | } |
135 | EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); | 123 | EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); |
136 | 124 | ||
137 | void drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX, | 125 | int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX, |
138 | uint32_t maxY) | 126 | uint32_t maxY) |
139 | { | 127 | { |
140 | struct drm_connector *connector; | 128 | struct drm_connector *connector; |
129 | int count = 0; | ||
141 | 130 | ||
142 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 131 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
143 | drm_helper_probe_single_connector_modes(connector, maxX, maxY); | 132 | count += drm_helper_probe_single_connector_modes(connector, |
133 | maxX, maxY); | ||
144 | } | 134 | } |
135 | |||
136 | return count; | ||
145 | } | 137 | } |
146 | EXPORT_SYMBOL(drm_helper_probe_connector_modes); | 138 | EXPORT_SYMBOL(drm_helper_probe_connector_modes); |
147 | 139 | ||
140 | static void drm_helper_add_std_modes(struct drm_device *dev, | ||
141 | struct drm_connector *connector) | ||
142 | { | ||
143 | struct drm_display_mode *mode, *t; | ||
144 | int i; | ||
145 | |||
146 | for (i = 0; i < ARRAY_SIZE(std_modes); i++) { | ||
147 | struct drm_display_mode *stdmode; | ||
148 | |||
149 | /* | ||
150 | * When no valid EDID modes are available we end up | ||
151 | * here and bailed in the past, now we add some standard | ||
152 | * modes and move on. | ||
153 | */ | ||
154 | stdmode = drm_mode_duplicate(dev, &std_modes[i]); | ||
155 | drm_mode_probed_add(connector, stdmode); | ||
156 | drm_mode_list_concat(&connector->probed_modes, | ||
157 | &connector->modes); | ||
158 | |||
159 | DRM_DEBUG("Adding mode %s to %s\n", stdmode->name, | ||
160 | drm_get_connector_name(connector)); | ||
161 | } | ||
162 | drm_mode_sort(&connector->modes); | ||
163 | |||
164 | DRM_DEBUG("Added std modes on %s\n", drm_get_connector_name(connector)); | ||
165 | list_for_each_entry_safe(mode, t, &connector->modes, head) { | ||
166 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
167 | |||
168 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | ||
169 | drm_mode_debug_printmodeline(mode); | ||
170 | } | ||
171 | } | ||
148 | 172 | ||
149 | /** | 173 | /** |
150 | * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config | 174 | * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config |
@@ -237,6 +261,8 @@ static void drm_enable_connectors(struct drm_device *dev, bool *enabled) | |||
237 | 261 | ||
238 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 262 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
239 | enabled[i] = drm_connector_enabled(connector, true); | 263 | enabled[i] = drm_connector_enabled(connector, true); |
264 | DRM_DEBUG("connector %d enabled? %s\n", connector->base.id, | ||
265 | enabled[i] ? "yes" : "no"); | ||
240 | any_enabled |= enabled[i]; | 266 | any_enabled |= enabled[i]; |
241 | i++; | 267 | i++; |
242 | } | 268 | } |
@@ -265,11 +291,17 @@ static bool drm_target_preferred(struct drm_device *dev, | |||
265 | continue; | 291 | continue; |
266 | } | 292 | } |
267 | 293 | ||
294 | DRM_DEBUG("looking for preferred mode on connector %d\n", | ||
295 | connector->base.id); | ||
296 | |||
268 | modes[i] = drm_has_preferred_mode(connector, width, height); | 297 | modes[i] = drm_has_preferred_mode(connector, width, height); |
269 | if (!modes[i]) { | 298 | /* No preferred modes, pick one off the list */ |
299 | if (!modes[i] && !list_empty(&connector->modes)) { | ||
270 | list_for_each_entry(modes[i], &connector->modes, head) | 300 | list_for_each_entry(modes[i], &connector->modes, head) |
271 | break; | 301 | break; |
272 | } | 302 | } |
303 | DRM_DEBUG("found mode %s\n", modes[i] ? modes[i]->name : | ||
304 | "none"); | ||
273 | i++; | 305 | i++; |
274 | } | 306 | } |
275 | return true; | 307 | return true; |
@@ -369,6 +401,8 @@ static void drm_setup_crtcs(struct drm_device *dev) | |||
369 | int width, height; | 401 | int width, height; |
370 | int i, ret; | 402 | int i, ret; |
371 | 403 | ||
404 | DRM_DEBUG("\n"); | ||
405 | |||
372 | width = dev->mode_config.max_width; | 406 | width = dev->mode_config.max_width; |
373 | height = dev->mode_config.max_height; | 407 | height = dev->mode_config.max_height; |
374 | 408 | ||
@@ -390,6 +424,8 @@ static void drm_setup_crtcs(struct drm_device *dev) | |||
390 | if (!ret) | 424 | if (!ret) |
391 | DRM_ERROR("Unable to find initial modes\n"); | 425 | DRM_ERROR("Unable to find initial modes\n"); |
392 | 426 | ||
427 | DRM_DEBUG("picking CRTCs for %dx%d config\n", width, height); | ||
428 | |||
393 | drm_pick_crtcs(dev, crtcs, modes, 0, width, height); | 429 | drm_pick_crtcs(dev, crtcs, modes, 0, width, height); |
394 | 430 | ||
395 | i = 0; | 431 | i = 0; |
@@ -403,6 +439,8 @@ static void drm_setup_crtcs(struct drm_device *dev) | |||
403 | } | 439 | } |
404 | 440 | ||
405 | if (mode && crtc) { | 441 | if (mode && crtc) { |
442 | DRM_DEBUG("desired mode %s set on crtc %d\n", | ||
443 | mode->name, crtc->base.id); | ||
406 | crtc->desired_mode = mode; | 444 | crtc->desired_mode = mode; |
407 | connector->encoder->crtc = crtc; | 445 | connector->encoder->crtc = crtc; |
408 | } else | 446 | } else |
@@ -414,6 +452,59 @@ static void drm_setup_crtcs(struct drm_device *dev) | |||
414 | kfree(modes); | 452 | kfree(modes); |
415 | kfree(enabled); | 453 | kfree(enabled); |
416 | } | 454 | } |
455 | |||
456 | /** | ||
457 | * drm_encoder_crtc_ok - can a given crtc drive a given encoder? | ||
458 | * @encoder: encoder to test | ||
459 | * @crtc: crtc to test | ||
460 | * | ||
461 | * Return false if @encoder can't be driven by @crtc, true otherwise. | ||
462 | */ | ||
463 | static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, | ||
464 | struct drm_crtc *crtc) | ||
465 | { | ||
466 | struct drm_device *dev; | ||
467 | struct drm_crtc *tmp; | ||
468 | int crtc_mask = 1; | ||
469 | |||
470 | WARN(!crtc, "checking null crtc?"); | ||
471 | |||
472 | dev = crtc->dev; | ||
473 | |||
474 | list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { | ||
475 | if (tmp == crtc) | ||
476 | break; | ||
477 | crtc_mask <<= 1; | ||
478 | } | ||
479 | |||
480 | if (encoder->possible_crtcs & crtc_mask) | ||
481 | return true; | ||
482 | return false; | ||
483 | } | ||
484 | |||
485 | /* | ||
486 | * Check the CRTC we're going to map each output to vs. its current | ||
487 | * CRTC. If they don't match, we have to disable the output and the CRTC | ||
488 | * since the driver will have to re-route things. | ||
489 | */ | ||
490 | static void | ||
491 | drm_crtc_prepare_encoders(struct drm_device *dev) | ||
492 | { | ||
493 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
494 | struct drm_encoder *encoder; | ||
495 | |||
496 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
497 | encoder_funcs = encoder->helper_private; | ||
498 | /* Disable unused encoders */ | ||
499 | if (encoder->crtc == NULL) | ||
500 | (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); | ||
501 | /* Disable encoders whose CRTC is about to change */ | ||
502 | if (encoder_funcs->get_crtc && | ||
503 | encoder->crtc != (*encoder_funcs->get_crtc)(encoder)) | ||
504 | (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); | ||
505 | } | ||
506 | } | ||
507 | |||
417 | /** | 508 | /** |
418 | * drm_crtc_set_mode - set a mode | 509 | * drm_crtc_set_mode - set a mode |
419 | * @crtc: CRTC to program | 510 | * @crtc: CRTC to program |
@@ -442,6 +533,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
442 | int saved_x, saved_y; | 533 | int saved_x, saved_y; |
443 | struct drm_encoder *encoder; | 534 | struct drm_encoder *encoder; |
444 | bool ret = true; | 535 | bool ret = true; |
536 | bool depth_changed, bpp_changed; | ||
445 | 537 | ||
446 | adjusted_mode = drm_mode_duplicate(dev, mode); | 538 | adjusted_mode = drm_mode_duplicate(dev, mode); |
447 | 539 | ||
@@ -450,6 +542,15 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
450 | if (!crtc->enabled) | 542 | if (!crtc->enabled) |
451 | return true; | 543 | return true; |
452 | 544 | ||
545 | if (old_fb && crtc->fb) { | ||
546 | depth_changed = (old_fb->depth != crtc->fb->depth); | ||
547 | bpp_changed = (old_fb->bits_per_pixel != | ||
548 | crtc->fb->bits_per_pixel); | ||
549 | } else { | ||
550 | depth_changed = true; | ||
551 | bpp_changed = true; | ||
552 | } | ||
553 | |||
453 | saved_mode = crtc->mode; | 554 | saved_mode = crtc->mode; |
454 | saved_x = crtc->x; | 555 | saved_x = crtc->x; |
455 | saved_y = crtc->y; | 556 | saved_y = crtc->y; |
@@ -462,9 +563,10 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
462 | crtc->y = y; | 563 | crtc->y = y; |
463 | 564 | ||
464 | if (drm_mode_equal(&saved_mode, &crtc->mode)) { | 565 | if (drm_mode_equal(&saved_mode, &crtc->mode)) { |
465 | if (saved_x != crtc->x || saved_y != crtc->y) { | 566 | if (saved_x != crtc->x || saved_y != crtc->y || |
466 | crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, | 567 | depth_changed || bpp_changed) { |
467 | old_fb); | 568 | ret = !crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, |
569 | old_fb); | ||
468 | goto done; | 570 | goto done; |
469 | } | 571 | } |
470 | } | 572 | } |
@@ -498,12 +600,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
498 | encoder_funcs->prepare(encoder); | 600 | encoder_funcs->prepare(encoder); |
499 | } | 601 | } |
500 | 602 | ||
603 | drm_crtc_prepare_encoders(dev); | ||
604 | |||
501 | crtc_funcs->prepare(crtc); | 605 | crtc_funcs->prepare(crtc); |
502 | 606 | ||
503 | /* Set up the DPLL and any encoders state that needs to adjust or depend | 607 | /* Set up the DPLL and any encoders state that needs to adjust or depend |
504 | * on the DPLL. | 608 | * on the DPLL. |
505 | */ | 609 | */ |
506 | crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); | 610 | ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); |
611 | if (!ret) | ||
612 | goto done; | ||
507 | 613 | ||
508 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 614 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
509 | 615 | ||
@@ -566,10 +672,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
566 | struct drm_device *dev; | 672 | struct drm_device *dev; |
567 | struct drm_crtc **save_crtcs, *new_crtc; | 673 | struct drm_crtc **save_crtcs, *new_crtc; |
568 | struct drm_encoder **save_encoders, *new_encoder; | 674 | struct drm_encoder **save_encoders, *new_encoder; |
569 | struct drm_framebuffer *old_fb; | 675 | struct drm_framebuffer *old_fb = NULL; |
570 | bool save_enabled; | 676 | bool save_enabled; |
571 | bool changed = false; | 677 | bool mode_changed = false; |
572 | bool flip_or_move = false; | 678 | bool fb_changed = false; |
573 | struct drm_connector *connector; | 679 | struct drm_connector *connector; |
574 | int count = 0, ro, fail = 0; | 680 | int count = 0, ro, fail = 0; |
575 | struct drm_crtc_helper_funcs *crtc_funcs; | 681 | struct drm_crtc_helper_funcs *crtc_funcs; |
@@ -597,7 +703,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
597 | /* save previous config */ | 703 | /* save previous config */ |
598 | save_enabled = set->crtc->enabled; | 704 | save_enabled = set->crtc->enabled; |
599 | 705 | ||
600 | /* this is meant to be num_connector not num_crtc */ | 706 | /* |
707 | * We do mode_config.num_connectors here since we'll look at the | ||
708 | * CRTC and encoder associated with each connector later. | ||
709 | */ | ||
601 | save_crtcs = kzalloc(dev->mode_config.num_connector * | 710 | save_crtcs = kzalloc(dev->mode_config.num_connector * |
602 | sizeof(struct drm_crtc *), GFP_KERNEL); | 711 | sizeof(struct drm_crtc *), GFP_KERNEL); |
603 | if (!save_crtcs) | 712 | if (!save_crtcs) |
@@ -613,21 +722,26 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
613 | /* We should be able to check here if the fb has the same properties | 722 | /* We should be able to check here if the fb has the same properties |
614 | * and then just flip_or_move it */ | 723 | * and then just flip_or_move it */ |
615 | if (set->crtc->fb != set->fb) { | 724 | if (set->crtc->fb != set->fb) { |
616 | /* if we have no fb then its a change not a flip */ | 725 | /* If we have no fb then treat it as a full mode set */ |
617 | if (set->crtc->fb == NULL) | 726 | if (set->crtc->fb == NULL) { |
618 | changed = true; | 727 | DRM_DEBUG("crtc has no fb, full mode set\n"); |
728 | mode_changed = true; | ||
729 | } else if ((set->fb->bits_per_pixel != | ||
730 | set->crtc->fb->bits_per_pixel) || | ||
731 | set->fb->depth != set->crtc->fb->depth) | ||
732 | fb_changed = true; | ||
619 | else | 733 | else |
620 | flip_or_move = true; | 734 | fb_changed = true; |
621 | } | 735 | } |
622 | 736 | ||
623 | if (set->x != set->crtc->x || set->y != set->crtc->y) | 737 | if (set->x != set->crtc->x || set->y != set->crtc->y) |
624 | flip_or_move = true; | 738 | fb_changed = true; |
625 | 739 | ||
626 | if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { | 740 | if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { |
627 | DRM_DEBUG("modes are different\n"); | 741 | DRM_DEBUG("modes are different, full mode set\n"); |
628 | drm_mode_debug_printmodeline(&set->crtc->mode); | 742 | drm_mode_debug_printmodeline(&set->crtc->mode); |
629 | drm_mode_debug_printmodeline(set->mode); | 743 | drm_mode_debug_printmodeline(set->mode); |
630 | changed = true; | 744 | mode_changed = true; |
631 | } | 745 | } |
632 | 746 | ||
633 | /* a) traverse passed in connector list and get encoders for them */ | 747 | /* a) traverse passed in connector list and get encoders for them */ |
@@ -650,7 +764,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
650 | } | 764 | } |
651 | 765 | ||
652 | if (new_encoder != connector->encoder) { | 766 | if (new_encoder != connector->encoder) { |
653 | changed = true; | 767 | DRM_DEBUG("encoder changed, full mode switch\n"); |
768 | mode_changed = true; | ||
654 | connector->encoder = new_encoder; | 769 | connector->encoder = new_encoder; |
655 | } | 770 | } |
656 | } | 771 | } |
@@ -676,17 +791,27 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
676 | if (set->connectors[ro] == connector) | 791 | if (set->connectors[ro] == connector) |
677 | new_crtc = set->crtc; | 792 | new_crtc = set->crtc; |
678 | } | 793 | } |
794 | |||
795 | /* Make sure the new CRTC will work with the encoder */ | ||
796 | if (new_crtc && | ||
797 | !drm_encoder_crtc_ok(connector->encoder, new_crtc)) { | ||
798 | ret = -EINVAL; | ||
799 | goto fail_set_mode; | ||
800 | } | ||
679 | if (new_crtc != connector->encoder->crtc) { | 801 | if (new_crtc != connector->encoder->crtc) { |
680 | changed = true; | 802 | DRM_DEBUG("crtc changed, full mode switch\n"); |
803 | mode_changed = true; | ||
681 | connector->encoder->crtc = new_crtc; | 804 | connector->encoder->crtc = new_crtc; |
682 | } | 805 | } |
806 | DRM_DEBUG("setting connector %d crtc to %p\n", | ||
807 | connector->base.id, new_crtc); | ||
683 | } | 808 | } |
684 | 809 | ||
685 | /* mode_set_base is not a required function */ | 810 | /* mode_set_base is not a required function */ |
686 | if (flip_or_move && !crtc_funcs->mode_set_base) | 811 | if (fb_changed && !crtc_funcs->mode_set_base) |
687 | changed = true; | 812 | mode_changed = true; |
688 | 813 | ||
689 | if (changed) { | 814 | if (mode_changed) { |
690 | old_fb = set->crtc->fb; | 815 | old_fb = set->crtc->fb; |
691 | set->crtc->fb = set->fb; | 816 | set->crtc->fb = set->fb; |
692 | set->crtc->enabled = (set->mode != NULL); | 817 | set->crtc->enabled = (set->mode != NULL); |
@@ -696,6 +821,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
696 | if (!drm_crtc_helper_set_mode(set->crtc, set->mode, | 821 | if (!drm_crtc_helper_set_mode(set->crtc, set->mode, |
697 | set->x, set->y, | 822 | set->x, set->y, |
698 | old_fb)) { | 823 | old_fb)) { |
824 | DRM_ERROR("failed to set mode on crtc %p\n", | ||
825 | set->crtc); | ||
699 | ret = -EINVAL; | 826 | ret = -EINVAL; |
700 | goto fail_set_mode; | 827 | goto fail_set_mode; |
701 | } | 828 | } |
@@ -705,11 +832,14 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
705 | set->crtc->desired_mode = set->mode; | 832 | set->crtc->desired_mode = set->mode; |
706 | } | 833 | } |
707 | drm_helper_disable_unused_functions(dev); | 834 | drm_helper_disable_unused_functions(dev); |
708 | } else if (flip_or_move) { | 835 | } else if (fb_changed) { |
709 | old_fb = set->crtc->fb; | 836 | old_fb = set->crtc->fb; |
710 | if (set->crtc->fb != set->fb) | 837 | if (set->crtc->fb != set->fb) |
711 | set->crtc->fb = set->fb; | 838 | set->crtc->fb = set->fb; |
712 | crtc_funcs->mode_set_base(set->crtc, set->x, set->y, old_fb); | 839 | ret = crtc_funcs->mode_set_base(set->crtc, |
840 | set->x, set->y, old_fb); | ||
841 | if (ret != 0) | ||
842 | goto fail_set_mode; | ||
713 | } | 843 | } |
714 | 844 | ||
715 | kfree(save_encoders); | 845 | kfree(save_encoders); |
@@ -718,9 +848,14 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
718 | 848 | ||
719 | fail_set_mode: | 849 | fail_set_mode: |
720 | set->crtc->enabled = save_enabled; | 850 | set->crtc->enabled = save_enabled; |
851 | set->crtc->fb = old_fb; | ||
721 | count = 0; | 852 | count = 0; |
722 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | 853 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
854 | if (!connector->encoder) | ||
855 | continue; | ||
856 | |||
723 | connector->encoder->crtc = save_crtcs[count++]; | 857 | connector->encoder->crtc = save_crtcs[count++]; |
858 | } | ||
724 | fail_no_encoder: | 859 | fail_no_encoder: |
725 | kfree(save_crtcs); | 860 | kfree(save_crtcs); |
726 | count = 0; | 861 | count = 0; |
@@ -764,10 +899,31 @@ bool drm_helper_plugged_event(struct drm_device *dev) | |||
764 | */ | 899 | */ |
765 | bool drm_helper_initial_config(struct drm_device *dev, bool can_grow) | 900 | bool drm_helper_initial_config(struct drm_device *dev, bool can_grow) |
766 | { | 901 | { |
767 | int ret = false; | 902 | struct drm_connector *connector; |
903 | int count = 0; | ||
768 | 904 | ||
769 | drm_helper_plugged_event(dev); | 905 | count = drm_helper_probe_connector_modes(dev, |
770 | return ret; | 906 | dev->mode_config.max_width, |
907 | dev->mode_config.max_height); | ||
908 | |||
909 | /* | ||
910 | * None of the available connectors had any modes, so add some | ||
911 | * and try to light them up anyway | ||
912 | */ | ||
913 | if (!count) { | ||
914 | DRM_ERROR("connectors have no modes, using standard modes\n"); | ||
915 | list_for_each_entry(connector, | ||
916 | &dev->mode_config.connector_list, | ||
917 | head) | ||
918 | drm_helper_add_std_modes(dev, connector); | ||
919 | } | ||
920 | |||
921 | drm_setup_crtcs(dev); | ||
922 | |||
923 | /* alert the driver fb layer */ | ||
924 | dev->mode_config.funcs->fb_changed(dev); | ||
925 | |||
926 | return 0; | ||
771 | } | 927 | } |
772 | EXPORT_SYMBOL(drm_helper_initial_config); | 928 | EXPORT_SYMBOL(drm_helper_initial_config); |
773 | 929 | ||
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 5ff88d952226..14c7a23dc157 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -294,6 +294,7 @@ EXPORT_SYMBOL(drm_init); | |||
294 | */ | 294 | */ |
295 | static void drm_cleanup(struct drm_device * dev) | 295 | static void drm_cleanup(struct drm_device * dev) |
296 | { | 296 | { |
297 | struct drm_map_list *r_list, *list_temp; | ||
297 | DRM_DEBUG("\n"); | 298 | DRM_DEBUG("\n"); |
298 | 299 | ||
299 | if (!dev) { | 300 | if (!dev) { |
@@ -325,6 +326,9 @@ static void drm_cleanup(struct drm_device * dev) | |||
325 | drm_ht_remove(&dev->map_hash); | 326 | drm_ht_remove(&dev->map_hash); |
326 | drm_ctxbitmap_cleanup(dev); | 327 | drm_ctxbitmap_cleanup(dev); |
327 | 328 | ||
329 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) | ||
330 | drm_rmmap(dev, r_list->map); | ||
331 | |||
328 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 332 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
329 | drm_put_minor(&dev->control); | 333 | drm_put_minor(&dev->control); |
330 | 334 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 0fbb0da342cb..a839a28d8ee6 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -125,7 +125,7 @@ static bool edid_is_valid(struct edid *edid) | |||
125 | DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); | 125 | DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); |
126 | goto bad; | 126 | goto bad; |
127 | } | 127 | } |
128 | if (edid->revision <= 0 || edid->revision > 3) { | 128 | if (edid->revision > 3) { |
129 | DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision); | 129 | DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision); |
130 | goto bad; | 130 | goto bad; |
131 | } | 131 | } |
@@ -320,10 +320,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
320 | mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo); | 320 | mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo); |
321 | 321 | ||
322 | mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo; | 322 | mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo; |
323 | mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) | | 323 | mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 4) | |
324 | pt->vsync_offset_lo); | 324 | pt->vsync_offset_lo); |
325 | mode->vsync_end = mode->vsync_start + | 325 | mode->vsync_end = mode->vsync_start + |
326 | ((pt->vsync_pulse_width_hi << 8) | | 326 | ((pt->vsync_pulse_width_hi << 4) | |
327 | pt->vsync_pulse_width_lo); | 327 | pt->vsync_pulse_width_lo); |
328 | mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo); | 328 | mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo); |
329 | 329 | ||
@@ -660,7 +660,7 @@ struct edid *drm_get_edid(struct drm_connector *connector, | |||
660 | 660 | ||
661 | edid = (struct edid *)drm_ddc_read(adapter); | 661 | edid = (struct edid *)drm_ddc_read(adapter); |
662 | if (!edid) { | 662 | if (!edid) { |
663 | dev_warn(&connector->dev->pdev->dev, "%s: no EDID data\n", | 663 | dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n", |
664 | drm_get_connector_name(connector)); | 664 | drm_get_connector_name(connector)); |
665 | return NULL; | 665 | return NULL; |
666 | } | 666 | } |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index b06a53715853..f52663ebe016 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -457,6 +457,9 @@ int drm_release(struct inode *inode, struct file *filp) | |||
457 | if (dev->driver->driver_features & DRIVER_GEM) | 457 | if (dev->driver->driver_features & DRIVER_GEM) |
458 | drm_gem_release(dev, file_priv); | 458 | drm_gem_release(dev, file_priv); |
459 | 459 | ||
460 | if (dev->driver->driver_features & DRIVER_MODESET) | ||
461 | drm_fb_release(file_priv); | ||
462 | |||
460 | mutex_lock(&dev->ctxlist_mutex); | 463 | mutex_lock(&dev->ctxlist_mutex); |
461 | if (!list_empty(&dev->ctxlist)) { | 464 | if (!list_empty(&dev->ctxlist)) { |
462 | struct drm_ctx_list *pos, *n; | 465 | struct drm_ctx_list *pos, *n; |
@@ -481,6 +484,7 @@ int drm_release(struct inode *inode, struct file *filp) | |||
481 | mutex_lock(&dev->struct_mutex); | 484 | mutex_lock(&dev->struct_mutex); |
482 | 485 | ||
483 | if (file_priv->is_master) { | 486 | if (file_priv->is_master) { |
487 | struct drm_master *master = file_priv->master; | ||
484 | struct drm_file *temp; | 488 | struct drm_file *temp; |
485 | list_for_each_entry(temp, &dev->filelist, lhead) { | 489 | list_for_each_entry(temp, &dev->filelist, lhead) { |
486 | if ((temp->master == file_priv->master) && | 490 | if ((temp->master == file_priv->master) && |
@@ -488,6 +492,19 @@ int drm_release(struct inode *inode, struct file *filp) | |||
488 | temp->authenticated = 0; | 492 | temp->authenticated = 0; |
489 | } | 493 | } |
490 | 494 | ||
495 | /** | ||
496 | * Since the master is disappearing, so is the | ||
497 | * possibility to lock. | ||
498 | */ | ||
499 | |||
500 | if (master->lock.hw_lock) { | ||
501 | if (dev->sigdata.lock == master->lock.hw_lock) | ||
502 | dev->sigdata.lock = NULL; | ||
503 | master->lock.hw_lock = NULL; | ||
504 | master->lock.file_priv = NULL; | ||
505 | wake_up_interruptible_all(&master->lock.lock_queue); | ||
506 | } | ||
507 | |||
491 | if (file_priv->minor->master == file_priv->master) { | 508 | if (file_priv->minor->master == file_priv->master) { |
492 | /* drop the reference held my the minor */ | 509 | /* drop the reference held my the minor */ |
493 | drm_master_put(&file_priv->minor->master); | 510 | drm_master_put(&file_priv->minor->master); |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 9da581452874..88d3368ffddd 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -104,8 +104,8 @@ drm_gem_init(struct drm_device *dev) | |||
104 | 104 | ||
105 | if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, | 105 | if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, |
106 | DRM_FILE_PAGE_OFFSET_SIZE)) { | 106 | DRM_FILE_PAGE_OFFSET_SIZE)) { |
107 | drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
108 | drm_ht_remove(&mm->offset_hash); | 107 | drm_ht_remove(&mm->offset_hash); |
108 | drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
109 | return -ENOMEM; | 109 | return -ENOMEM; |
110 | } | 110 | } |
111 | 111 | ||
@@ -136,7 +136,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
136 | obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); | 136 | obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); |
137 | 137 | ||
138 | obj->dev = dev; | 138 | obj->dev = dev; |
139 | obj->filp = shmem_file_setup("drm mm object", size, 0); | 139 | obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
140 | if (IS_ERR(obj->filp)) { | 140 | if (IS_ERR(obj->filp)) { |
141 | kfree(obj); | 141 | kfree(obj); |
142 | return NULL; | 142 | return NULL; |
@@ -295,35 +295,37 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, | |||
295 | return -EBADF; | 295 | return -EBADF; |
296 | 296 | ||
297 | again: | 297 | again: |
298 | if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) | 298 | if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { |
299 | return -ENOMEM; | 299 | ret = -ENOMEM; |
300 | goto err; | ||
301 | } | ||
300 | 302 | ||
301 | spin_lock(&dev->object_name_lock); | 303 | spin_lock(&dev->object_name_lock); |
302 | if (obj->name) { | 304 | if (!obj->name) { |
303 | args->name = obj->name; | 305 | ret = idr_get_new_above(&dev->object_name_idr, obj, 1, |
306 | &obj->name); | ||
307 | args->name = (uint64_t) obj->name; | ||
304 | spin_unlock(&dev->object_name_lock); | 308 | spin_unlock(&dev->object_name_lock); |
305 | return 0; | ||
306 | } | ||
307 | ret = idr_get_new_above(&dev->object_name_idr, obj, 1, | ||
308 | &obj->name); | ||
309 | spin_unlock(&dev->object_name_lock); | ||
310 | if (ret == -EAGAIN) | ||
311 | goto again; | ||
312 | 309 | ||
313 | if (ret != 0) { | 310 | if (ret == -EAGAIN) |
314 | mutex_lock(&dev->struct_mutex); | 311 | goto again; |
315 | drm_gem_object_unreference(obj); | ||
316 | mutex_unlock(&dev->struct_mutex); | ||
317 | return ret; | ||
318 | } | ||
319 | 312 | ||
320 | /* | 313 | if (ret != 0) |
321 | * Leave the reference from the lookup around as the | 314 | goto err; |
322 | * name table now holds one | ||
323 | */ | ||
324 | args->name = (uint64_t) obj->name; | ||
325 | 315 | ||
326 | return 0; | 316 | /* Allocate a reference for the name table. */ |
317 | drm_gem_object_reference(obj); | ||
318 | } else { | ||
319 | args->name = (uint64_t) obj->name; | ||
320 | spin_unlock(&dev->object_name_lock); | ||
321 | ret = 0; | ||
322 | } | ||
323 | |||
324 | err: | ||
325 | mutex_lock(&dev->struct_mutex); | ||
326 | drm_gem_object_unreference(obj); | ||
327 | mutex_unlock(&dev->struct_mutex); | ||
328 | return ret; | ||
327 | } | 329 | } |
328 | 330 | ||
329 | /** | 331 | /** |
@@ -448,6 +450,7 @@ drm_gem_object_handle_free(struct kref *kref) | |||
448 | spin_lock(&dev->object_name_lock); | 450 | spin_lock(&dev->object_name_lock); |
449 | if (obj->name) { | 451 | if (obj->name) { |
450 | idr_remove(&dev->object_name_idr, obj->name); | 452 | idr_remove(&dev->object_name_idr, obj->name); |
453 | obj->name = 0; | ||
451 | spin_unlock(&dev->object_name_lock); | 454 | spin_unlock(&dev->object_name_lock); |
452 | /* | 455 | /* |
453 | * The object name held a reference to this object, drop | 456 | * The object name held a reference to this object, drop |
@@ -460,6 +463,26 @@ drm_gem_object_handle_free(struct kref *kref) | |||
460 | } | 463 | } |
461 | EXPORT_SYMBOL(drm_gem_object_handle_free); | 464 | EXPORT_SYMBOL(drm_gem_object_handle_free); |
462 | 465 | ||
466 | void drm_gem_vm_open(struct vm_area_struct *vma) | ||
467 | { | ||
468 | struct drm_gem_object *obj = vma->vm_private_data; | ||
469 | |||
470 | drm_gem_object_reference(obj); | ||
471 | } | ||
472 | EXPORT_SYMBOL(drm_gem_vm_open); | ||
473 | |||
474 | void drm_gem_vm_close(struct vm_area_struct *vma) | ||
475 | { | ||
476 | struct drm_gem_object *obj = vma->vm_private_data; | ||
477 | struct drm_device *dev = obj->dev; | ||
478 | |||
479 | mutex_lock(&dev->struct_mutex); | ||
480 | drm_gem_object_unreference(obj); | ||
481 | mutex_unlock(&dev->struct_mutex); | ||
482 | } | ||
483 | EXPORT_SYMBOL(drm_gem_vm_close); | ||
484 | |||
485 | |||
463 | /** | 486 | /** |
464 | * drm_gem_mmap - memory map routine for GEM objects | 487 | * drm_gem_mmap - memory map routine for GEM objects |
465 | * @filp: DRM file pointer | 488 | * @filp: DRM file pointer |
@@ -521,6 +544,14 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
521 | #endif | 544 | #endif |
522 | vma->vm_page_prot = __pgprot(prot); | 545 | vma->vm_page_prot = __pgprot(prot); |
523 | 546 | ||
547 | /* Take a ref for this mapping of the object, so that the fault | ||
548 | * handler can dereference the mmap offset's pointer to the object. | ||
549 | * This reference is cleaned up by the corresponding vm_close | ||
550 | * (which should happen whether the vma was created by this call, or | ||
551 | * by a vm_open due to mremap or partial unmap or whatever). | ||
552 | */ | ||
553 | drm_gem_object_reference(obj); | ||
554 | |||
524 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | 555 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
525 | drm_vm_open_locked(vma); | 556 | drm_vm_open_locked(vma); |
526 | 557 | ||
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 724e505873cf..93e677a481f5 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -106,8 +106,6 @@ void drm_vblank_cleanup(struct drm_device *dev) | |||
106 | 106 | ||
107 | drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, | 107 | drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, |
108 | DRM_MEM_DRIVER); | 108 | DRM_MEM_DRIVER); |
109 | drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs, | ||
110 | DRM_MEM_DRIVER); | ||
111 | drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * | 109 | drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * |
112 | dev->num_crtcs, DRM_MEM_DRIVER); | 110 | dev->num_crtcs, DRM_MEM_DRIVER); |
113 | drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * | 111 | drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * |
@@ -132,7 +130,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
132 | setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, | 130 | setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, |
133 | (unsigned long)dev); | 131 | (unsigned long)dev); |
134 | spin_lock_init(&dev->vbl_lock); | 132 | spin_lock_init(&dev->vbl_lock); |
135 | atomic_set(&dev->vbl_signal_pending, 0); | ||
136 | dev->num_crtcs = num_crtcs; | 133 | dev->num_crtcs = num_crtcs; |
137 | 134 | ||
138 | dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, | 135 | dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, |
@@ -140,11 +137,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
140 | if (!dev->vbl_queue) | 137 | if (!dev->vbl_queue) |
141 | goto err; | 138 | goto err; |
142 | 139 | ||
143 | dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs, | ||
144 | DRM_MEM_DRIVER); | ||
145 | if (!dev->vbl_sigs) | ||
146 | goto err; | ||
147 | |||
148 | dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, | 140 | dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, |
149 | DRM_MEM_DRIVER); | 141 | DRM_MEM_DRIVER); |
150 | if (!dev->_vblank_count) | 142 | if (!dev->_vblank_count) |
@@ -177,7 +169,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
177 | /* Zero per-crtc vblank stuff */ | 169 | /* Zero per-crtc vblank stuff */ |
178 | for (i = 0; i < num_crtcs; i++) { | 170 | for (i = 0; i < num_crtcs; i++) { |
179 | init_waitqueue_head(&dev->vbl_queue[i]); | 171 | init_waitqueue_head(&dev->vbl_queue[i]); |
180 | INIT_LIST_HEAD(&dev->vbl_sigs[i]); | ||
181 | atomic_set(&dev->_vblank_count[i], 0); | 172 | atomic_set(&dev->_vblank_count[i], 0); |
182 | atomic_set(&dev->vblank_refcount[i], 0); | 173 | atomic_set(&dev->vblank_refcount[i], 0); |
183 | } | 174 | } |
@@ -267,7 +258,8 @@ EXPORT_SYMBOL(drm_irq_install); | |||
267 | */ | 258 | */ |
268 | int drm_irq_uninstall(struct drm_device * dev) | 259 | int drm_irq_uninstall(struct drm_device * dev) |
269 | { | 260 | { |
270 | int irq_enabled; | 261 | unsigned long irqflags; |
262 | int irq_enabled, i; | ||
271 | 263 | ||
272 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) | 264 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) |
273 | return -EINVAL; | 265 | return -EINVAL; |
@@ -277,6 +269,17 @@ int drm_irq_uninstall(struct drm_device * dev) | |||
277 | dev->irq_enabled = 0; | 269 | dev->irq_enabled = 0; |
278 | mutex_unlock(&dev->struct_mutex); | 270 | mutex_unlock(&dev->struct_mutex); |
279 | 271 | ||
272 | /* | ||
273 | * Wake up any waiters so they don't hang. | ||
274 | */ | ||
275 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
276 | for (i = 0; i < dev->num_crtcs; i++) { | ||
277 | DRM_WAKEUP(&dev->vbl_queue[i]); | ||
278 | dev->vblank_enabled[i] = 0; | ||
279 | dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); | ||
280 | } | ||
281 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
282 | |||
280 | if (!irq_enabled) | 283 | if (!irq_enabled) |
281 | return -EINVAL; | 284 | return -EINVAL; |
282 | 285 | ||
@@ -432,6 +435,8 @@ EXPORT_SYMBOL(drm_vblank_get); | |||
432 | */ | 435 | */ |
433 | void drm_vblank_put(struct drm_device *dev, int crtc) | 436 | void drm_vblank_put(struct drm_device *dev, int crtc) |
434 | { | 437 | { |
438 | BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0); | ||
439 | |||
435 | /* Last user schedules interrupt disable */ | 440 | /* Last user schedules interrupt disable */ |
436 | if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) | 441 | if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) |
437 | mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); | 442 | mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); |
@@ -457,8 +462,9 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) | |||
457 | * so that interrupts remain enabled in the interim. | 462 | * so that interrupts remain enabled in the interim. |
458 | */ | 463 | */ |
459 | if (!dev->vblank_inmodeset[crtc]) { | 464 | if (!dev->vblank_inmodeset[crtc]) { |
460 | dev->vblank_inmodeset[crtc] = 1; | 465 | dev->vblank_inmodeset[crtc] = 0x1; |
461 | drm_vblank_get(dev, crtc); | 466 | if (drm_vblank_get(dev, crtc) == 0) |
467 | dev->vblank_inmodeset[crtc] |= 0x2; | ||
462 | } | 468 | } |
463 | } | 469 | } |
464 | EXPORT_SYMBOL(drm_vblank_pre_modeset); | 470 | EXPORT_SYMBOL(drm_vblank_pre_modeset); |
@@ -470,9 +476,12 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc) | |||
470 | if (dev->vblank_inmodeset[crtc]) { | 476 | if (dev->vblank_inmodeset[crtc]) { |
471 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 477 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
472 | dev->vblank_disable_allowed = 1; | 478 | dev->vblank_disable_allowed = 1; |
473 | dev->vblank_inmodeset[crtc] = 0; | ||
474 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | 479 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); |
475 | drm_vblank_put(dev, crtc); | 480 | |
481 | if (dev->vblank_inmodeset[crtc] & 0x2) | ||
482 | drm_vblank_put(dev, crtc); | ||
483 | |||
484 | dev->vblank_inmodeset[crtc] = 0; | ||
476 | } | 485 | } |
477 | } | 486 | } |
478 | EXPORT_SYMBOL(drm_vblank_post_modeset); | 487 | EXPORT_SYMBOL(drm_vblank_post_modeset); |
@@ -529,15 +538,10 @@ out: | |||
529 | * \param data user argument, pointing to a drm_wait_vblank structure. | 538 | * \param data user argument, pointing to a drm_wait_vblank structure. |
530 | * \return zero on success or a negative number on failure. | 539 | * \return zero on success or a negative number on failure. |
531 | * | 540 | * |
532 | * Verifies the IRQ is installed. | 541 | * This function enables the vblank interrupt on the pipe requested, then |
533 | * | 542 | * sleeps waiting for the requested sequence number to occur, and drops |
534 | * If a signal is requested checks if this task has already scheduled the same signal | 543 | * the vblank interrupt refcount afterwards. (vblank irq disable follows that |
535 | * for the same vblank sequence number - nothing to be done in | 544 | * after a timeout with no further vblank waits scheduled). |
536 | * that case. If the number of tasks waiting for the interrupt exceeds 100 the | ||
537 | * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this | ||
538 | * task. | ||
539 | * | ||
540 | * If a signal is not requested, then calls vblank_wait(). | ||
541 | */ | 545 | */ |
542 | int drm_wait_vblank(struct drm_device *dev, void *data, | 546 | int drm_wait_vblank(struct drm_device *dev, void *data, |
543 | struct drm_file *file_priv) | 547 | struct drm_file *file_priv) |
@@ -549,6 +553,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
549 | if ((!dev->pdev->irq) || (!dev->irq_enabled)) | 553 | if ((!dev->pdev->irq) || (!dev->irq_enabled)) |
550 | return -EINVAL; | 554 | return -EINVAL; |
551 | 555 | ||
556 | if (vblwait->request.type & _DRM_VBLANK_SIGNAL) | ||
557 | return -EINVAL; | ||
558 | |||
552 | if (vblwait->request.type & | 559 | if (vblwait->request.type & |
553 | ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { | 560 | ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { |
554 | DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", | 561 | DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", |
@@ -586,88 +593,26 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
586 | vblwait->request.sequence = seq + 1; | 593 | vblwait->request.sequence = seq + 1; |
587 | } | 594 | } |
588 | 595 | ||
589 | if (flags & _DRM_VBLANK_SIGNAL) { | 596 | DRM_DEBUG("waiting on vblank count %d, crtc %d\n", |
590 | unsigned long irqflags; | 597 | vblwait->request.sequence, crtc); |
591 | struct list_head *vbl_sigs = &dev->vbl_sigs[crtc]; | 598 | dev->last_vblank_wait[crtc] = vblwait->request.sequence; |
592 | struct drm_vbl_sig *vbl_sig; | 599 | DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, |
593 | 600 | (((drm_vblank_count(dev, crtc) - | |
594 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 601 | vblwait->request.sequence) <= (1 << 23)) || |
595 | 602 | !dev->irq_enabled)); | |
596 | /* Check if this task has already scheduled the same signal | ||
597 | * for the same vblank sequence number; nothing to be done in | ||
598 | * that case | ||
599 | */ | ||
600 | list_for_each_entry(vbl_sig, vbl_sigs, head) { | ||
601 | if (vbl_sig->sequence == vblwait->request.sequence | ||
602 | && vbl_sig->info.si_signo == | ||
603 | vblwait->request.signal | ||
604 | && vbl_sig->task == current) { | ||
605 | spin_unlock_irqrestore(&dev->vbl_lock, | ||
606 | irqflags); | ||
607 | vblwait->reply.sequence = seq; | ||
608 | goto done; | ||
609 | } | ||
610 | } | ||
611 | |||
612 | if (atomic_read(&dev->vbl_signal_pending) >= 100) { | ||
613 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
614 | ret = -EBUSY; | ||
615 | goto done; | ||
616 | } | ||
617 | |||
618 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
619 | |||
620 | vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig), | ||
621 | DRM_MEM_DRIVER); | ||
622 | if (!vbl_sig) { | ||
623 | ret = -ENOMEM; | ||
624 | goto done; | ||
625 | } | ||
626 | |||
627 | /* Get a refcount on the vblank, which will be released by | ||
628 | * drm_vbl_send_signals(). | ||
629 | */ | ||
630 | ret = drm_vblank_get(dev, crtc); | ||
631 | if (ret) { | ||
632 | drm_free(vbl_sig, sizeof(struct drm_vbl_sig), | ||
633 | DRM_MEM_DRIVER); | ||
634 | goto done; | ||
635 | } | ||
636 | |||
637 | atomic_inc(&dev->vbl_signal_pending); | ||
638 | |||
639 | vbl_sig->sequence = vblwait->request.sequence; | ||
640 | vbl_sig->info.si_signo = vblwait->request.signal; | ||
641 | vbl_sig->task = current; | ||
642 | 603 | ||
643 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 604 | if (ret != -EINTR) { |
644 | 605 | struct timeval now; | |
645 | list_add_tail(&vbl_sig->head, vbl_sigs); | ||
646 | 606 | ||
647 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | 607 | do_gettimeofday(&now); |
648 | 608 | ||
649 | vblwait->reply.sequence = seq; | 609 | vblwait->reply.tval_sec = now.tv_sec; |
610 | vblwait->reply.tval_usec = now.tv_usec; | ||
611 | vblwait->reply.sequence = drm_vblank_count(dev, crtc); | ||
612 | DRM_DEBUG("returning %d to client\n", | ||
613 | vblwait->reply.sequence); | ||
650 | } else { | 614 | } else { |
651 | DRM_DEBUG("waiting on vblank count %d, crtc %d\n", | 615 | DRM_DEBUG("vblank wait interrupted by signal\n"); |
652 | vblwait->request.sequence, crtc); | ||
653 | dev->last_vblank_wait[crtc] = vblwait->request.sequence; | ||
654 | DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, | ||
655 | ((drm_vblank_count(dev, crtc) | ||
656 | - vblwait->request.sequence) <= (1 << 23))); | ||
657 | |||
658 | if (ret != -EINTR) { | ||
659 | struct timeval now; | ||
660 | |||
661 | do_gettimeofday(&now); | ||
662 | |||
663 | vblwait->reply.tval_sec = now.tv_sec; | ||
664 | vblwait->reply.tval_usec = now.tv_usec; | ||
665 | vblwait->reply.sequence = drm_vblank_count(dev, crtc); | ||
666 | DRM_DEBUG("returning %d to client\n", | ||
667 | vblwait->reply.sequence); | ||
668 | } else { | ||
669 | DRM_DEBUG("vblank wait interrupted by signal\n"); | ||
670 | } | ||
671 | } | 616 | } |
672 | 617 | ||
673 | done: | 618 | done: |
@@ -676,46 +621,6 @@ done: | |||
676 | } | 621 | } |
677 | 622 | ||
678 | /** | 623 | /** |
679 | * Send the VBLANK signals. | ||
680 | * | ||
681 | * \param dev DRM device. | ||
682 | * \param crtc CRTC where the vblank event occurred | ||
683 | * | ||
684 | * Sends a signal for each task in drm_device::vbl_sigs and empties the list. | ||
685 | * | ||
686 | * If a signal is not requested, then calls vblank_wait(). | ||
687 | */ | ||
688 | static void drm_vbl_send_signals(struct drm_device *dev, int crtc) | ||
689 | { | ||
690 | struct drm_vbl_sig *vbl_sig, *tmp; | ||
691 | struct list_head *vbl_sigs; | ||
692 | unsigned int vbl_seq; | ||
693 | unsigned long flags; | ||
694 | |||
695 | spin_lock_irqsave(&dev->vbl_lock, flags); | ||
696 | |||
697 | vbl_sigs = &dev->vbl_sigs[crtc]; | ||
698 | vbl_seq = drm_vblank_count(dev, crtc); | ||
699 | |||
700 | list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { | ||
701 | if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { | ||
702 | vbl_sig->info.si_code = vbl_seq; | ||
703 | send_sig_info(vbl_sig->info.si_signo, | ||
704 | &vbl_sig->info, vbl_sig->task); | ||
705 | |||
706 | list_del(&vbl_sig->head); | ||
707 | |||
708 | drm_free(vbl_sig, sizeof(*vbl_sig), | ||
709 | DRM_MEM_DRIVER); | ||
710 | atomic_dec(&dev->vbl_signal_pending); | ||
711 | drm_vblank_put(dev, crtc); | ||
712 | } | ||
713 | } | ||
714 | |||
715 | spin_unlock_irqrestore(&dev->vbl_lock, flags); | ||
716 | } | ||
717 | |||
718 | /** | ||
719 | * drm_handle_vblank - handle a vblank event | 624 | * drm_handle_vblank - handle a vblank event |
720 | * @dev: DRM device | 625 | * @dev: DRM device |
721 | * @crtc: where this event occurred | 626 | * @crtc: where this event occurred |
@@ -727,6 +632,5 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) | |||
727 | { | 632 | { |
728 | atomic_inc(&dev->_vblank_count[crtc]); | 633 | atomic_inc(&dev->_vblank_count[crtc]); |
729 | DRM_WAKEUP(&dev->vbl_queue[crtc]); | 634 | DRM_WAKEUP(&dev->vbl_queue[crtc]); |
730 | drm_vbl_send_signals(dev, crtc); | ||
731 | } | 635 | } |
732 | EXPORT_SYMBOL(drm_handle_vblank); | 636 | EXPORT_SYMBOL(drm_handle_vblank); |
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 46e7b28f0707..e2f70a516c34 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c | |||
@@ -80,6 +80,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
80 | __set_current_state(TASK_INTERRUPTIBLE); | 80 | __set_current_state(TASK_INTERRUPTIBLE); |
81 | if (!master->lock.hw_lock) { | 81 | if (!master->lock.hw_lock) { |
82 | /* Device has been unregistered */ | 82 | /* Device has been unregistered */ |
83 | send_sig(SIGTERM, current, 0); | ||
83 | ret = -EINTR; | 84 | ret = -EINTR; |
84 | break; | 85 | break; |
85 | } | 86 | } |
@@ -93,7 +94,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
93 | /* Contention */ | 94 | /* Contention */ |
94 | schedule(); | 95 | schedule(); |
95 | if (signal_pending(current)) { | 96 | if (signal_pending(current)) { |
96 | ret = -ERESTARTSYS; | 97 | ret = -EINTR; |
97 | break; | 98 | break; |
98 | } | 99 | } |
99 | } | 100 | } |
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 803bc9e7ce3c..bcc869bc4092 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c | |||
@@ -171,9 +171,14 @@ EXPORT_SYMBOL(drm_core_ioremap); | |||
171 | 171 | ||
172 | void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev) | 172 | void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev) |
173 | { | 173 | { |
174 | map->handle = ioremap_wc(map->offset, map->size); | 174 | if (drm_core_has_AGP(dev) && |
175 | dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) | ||
176 | map->handle = agp_remap(map->offset, map->size, dev); | ||
177 | else | ||
178 | map->handle = ioremap_wc(map->offset, map->size); | ||
175 | } | 179 | } |
176 | EXPORT_SYMBOL(drm_core_ioremap_wc); | 180 | EXPORT_SYMBOL(drm_core_ioremap_wc); |
181 | |||
177 | void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) | 182 | void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) |
178 | { | 183 | { |
179 | if (!map->handle || !map->size) | 184 | if (!map->handle || !map->size) |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 5ca132afa4f2..096e2a37446d 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -118,12 +118,20 @@ static void drm_master_destroy(struct kref *kref) | |||
118 | struct drm_master *master = container_of(kref, struct drm_master, refcount); | 118 | struct drm_master *master = container_of(kref, struct drm_master, refcount); |
119 | struct drm_magic_entry *pt, *next; | 119 | struct drm_magic_entry *pt, *next; |
120 | struct drm_device *dev = master->minor->dev; | 120 | struct drm_device *dev = master->minor->dev; |
121 | struct drm_map_list *r_list, *list_temp; | ||
121 | 122 | ||
122 | list_del(&master->head); | 123 | list_del(&master->head); |
123 | 124 | ||
124 | if (dev->driver->master_destroy) | 125 | if (dev->driver->master_destroy) |
125 | dev->driver->master_destroy(dev, master); | 126 | dev->driver->master_destroy(dev, master); |
126 | 127 | ||
128 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { | ||
129 | if (r_list->master == master) { | ||
130 | drm_rmmap_locked(dev, r_list->map); | ||
131 | r_list = NULL; | ||
132 | } | ||
133 | } | ||
134 | |||
127 | if (master->unique) { | 135 | if (master->unique) { |
128 | drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); | 136 | drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); |
129 | master->unique = NULL; | 137 | master->unique = NULL; |
@@ -138,14 +146,6 @@ static void drm_master_destroy(struct kref *kref) | |||
138 | 146 | ||
139 | drm_ht_remove(&master->magiclist); | 147 | drm_ht_remove(&master->magiclist); |
140 | 148 | ||
141 | if (master->lock.hw_lock) { | ||
142 | if (dev->sigdata.lock == master->lock.hw_lock) | ||
143 | dev->sigdata.lock = NULL; | ||
144 | master->lock.hw_lock = NULL; | ||
145 | master->lock.file_priv = NULL; | ||
146 | wake_up_interruptible(&master->lock.lock_queue); | ||
147 | } | ||
148 | |||
149 | drm_free(master, sizeof(*master), DRM_MEM_DRIVER); | 149 | drm_free(master, sizeof(*master), DRM_MEM_DRIVER); |
150 | } | 150 | } |
151 | 151 | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 62a4bf7b49df..6dab63bdc4c1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -177,6 +177,14 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
177 | drm_i915_private_t *dev_priv = dev->dev_private; | 177 | drm_i915_private_t *dev_priv = dev->dev_private; |
178 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 178 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
179 | 179 | ||
180 | master_priv->sarea = drm_getsarea(dev); | ||
181 | if (master_priv->sarea) { | ||
182 | master_priv->sarea_priv = (drm_i915_sarea_t *) | ||
183 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); | ||
184 | } else { | ||
185 | DRM_DEBUG("sarea not found assuming DRI2 userspace\n"); | ||
186 | } | ||
187 | |||
180 | if (init->ring_size != 0) { | 188 | if (init->ring_size != 0) { |
181 | if (dev_priv->ring.ring_obj != NULL) { | 189 | if (dev_priv->ring.ring_obj != NULL) { |
182 | i915_dma_cleanup(dev); | 190 | i915_dma_cleanup(dev); |
@@ -194,7 +202,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
194 | dev_priv->ring.map.flags = 0; | 202 | dev_priv->ring.map.flags = 0; |
195 | dev_priv->ring.map.mtrr = 0; | 203 | dev_priv->ring.map.mtrr = 0; |
196 | 204 | ||
197 | drm_core_ioremap(&dev_priv->ring.map, dev); | 205 | drm_core_ioremap_wc(&dev_priv->ring.map, dev); |
198 | 206 | ||
199 | if (dev_priv->ring.map.handle == NULL) { | 207 | if (dev_priv->ring.map.handle == NULL) { |
200 | i915_dma_cleanup(dev); | 208 | i915_dma_cleanup(dev); |
@@ -723,8 +731,11 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
723 | case I915_PARAM_HAS_GEM: | 731 | case I915_PARAM_HAS_GEM: |
724 | value = dev_priv->has_gem; | 732 | value = dev_priv->has_gem; |
725 | break; | 733 | break; |
734 | case I915_PARAM_NUM_FENCES_AVAIL: | ||
735 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; | ||
736 | break; | ||
726 | default: | 737 | default: |
727 | DRM_ERROR("Unknown parameter %d\n", param->param); | 738 | DRM_DEBUG("Unknown parameter %d\n", param->param); |
728 | return -EINVAL; | 739 | return -EINVAL; |
729 | } | 740 | } |
730 | 741 | ||
@@ -756,8 +767,15 @@ static int i915_setparam(struct drm_device *dev, void *data, | |||
756 | case I915_SETPARAM_ALLOW_BATCHBUFFER: | 767 | case I915_SETPARAM_ALLOW_BATCHBUFFER: |
757 | dev_priv->allow_batchbuffer = param->value; | 768 | dev_priv->allow_batchbuffer = param->value; |
758 | break; | 769 | break; |
770 | case I915_SETPARAM_NUM_USED_FENCES: | ||
771 | if (param->value > dev_priv->num_fence_regs || | ||
772 | param->value < 0) | ||
773 | return -EINVAL; | ||
774 | /* Userspace can use first N regs */ | ||
775 | dev_priv->fence_reg_start = param->value; | ||
776 | break; | ||
759 | default: | 777 | default: |
760 | DRM_ERROR("unknown parameter %d\n", param->param); | 778 | DRM_DEBUG("unknown parameter %d\n", param->param); |
761 | return -EINVAL; | 779 | return -EINVAL; |
762 | } | 780 | } |
763 | 781 | ||
@@ -793,7 +811,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
793 | dev_priv->hws_map.flags = 0; | 811 | dev_priv->hws_map.flags = 0; |
794 | dev_priv->hws_map.mtrr = 0; | 812 | dev_priv->hws_map.mtrr = 0; |
795 | 813 | ||
796 | drm_core_ioremap(&dev_priv->hws_map, dev); | 814 | drm_core_ioremap_wc(&dev_priv->hws_map, dev); |
797 | if (dev_priv->hws_map.handle == NULL) { | 815 | if (dev_priv->hws_map.handle == NULL) { |
798 | i915_dma_cleanup(dev); | 816 | i915_dma_cleanup(dev); |
799 | dev_priv->status_gfx_addr = 0; | 817 | dev_priv->status_gfx_addr = 0; |
@@ -936,13 +954,14 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
936 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & | 954 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & |
937 | 0xff000000; | 955 | 0xff000000; |
938 | 956 | ||
939 | DRM_DEBUG("*** fb base 0x%08lx\n", dev->mode_config.fb_base); | 957 | if (IS_MOBILE(dev) || IS_I9XX(dev)) |
940 | |||
941 | if (IS_MOBILE(dev) || (IS_I9XX(dev) && !IS_I965G(dev) && !IS_G33(dev))) | ||
942 | dev_priv->cursor_needs_physical = true; | 958 | dev_priv->cursor_needs_physical = true; |
943 | else | 959 | else |
944 | dev_priv->cursor_needs_physical = false; | 960 | dev_priv->cursor_needs_physical = false; |
945 | 961 | ||
962 | if (IS_I965G(dev) || IS_G33(dev)) | ||
963 | dev_priv->cursor_needs_physical = false; | ||
964 | |||
946 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size); | 965 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size); |
947 | if (ret) | 966 | if (ret) |
948 | goto kfree_devname; | 967 | goto kfree_devname; |
@@ -957,10 +976,6 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
957 | if (ret) | 976 | if (ret) |
958 | goto kfree_devname; | 977 | goto kfree_devname; |
959 | 978 | ||
960 | dev_priv->mm.gtt_mapping = | ||
961 | io_mapping_create_wc(dev->agp->base, | ||
962 | dev->agp->agp_info.aper_size * 1024*1024); | ||
963 | |||
964 | /* Allow hardware batchbuffers unless told otherwise. | 979 | /* Allow hardware batchbuffers unless told otherwise. |
965 | */ | 980 | */ |
966 | dev_priv->allow_batchbuffer = 1; | 981 | dev_priv->allow_batchbuffer = 1; |
@@ -1072,6 +1087,28 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1072 | goto free_priv; | 1087 | goto free_priv; |
1073 | } | 1088 | } |
1074 | 1089 | ||
1090 | dev_priv->mm.gtt_mapping = | ||
1091 | io_mapping_create_wc(dev->agp->base, | ||
1092 | dev->agp->agp_info.aper_size * 1024*1024); | ||
1093 | if (dev_priv->mm.gtt_mapping == NULL) { | ||
1094 | ret = -EIO; | ||
1095 | goto out_rmmap; | ||
1096 | } | ||
1097 | |||
1098 | /* Set up a WC MTRR for non-PAT systems. This is more common than | ||
1099 | * one would think, because the kernel disables PAT on first | ||
1100 | * generation Core chips because WC PAT gets overridden by a UC | ||
1101 | * MTRR if present. Even if a UC MTRR isn't present. | ||
1102 | */ | ||
1103 | dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, | ||
1104 | dev->agp->agp_info.aper_size * | ||
1105 | 1024 * 1024, | ||
1106 | MTRR_TYPE_WRCOMB, 1); | ||
1107 | if (dev_priv->mm.gtt_mtrr < 0) { | ||
1108 | DRM_INFO("MTRR allocation failed\n. Graphics " | ||
1109 | "performance may suffer.\n"); | ||
1110 | } | ||
1111 | |||
1075 | #ifdef CONFIG_HIGHMEM64G | 1112 | #ifdef CONFIG_HIGHMEM64G |
1076 | /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ | 1113 | /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ |
1077 | dev_priv->has_gem = 0; | 1114 | dev_priv->has_gem = 0; |
@@ -1080,13 +1117,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1080 | dev_priv->has_gem = 1; | 1117 | dev_priv->has_gem = 1; |
1081 | #endif | 1118 | #endif |
1082 | 1119 | ||
1120 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | ||
1121 | if (IS_GM45(dev)) | ||
1122 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | ||
1123 | |||
1083 | i915_gem_load(dev); | 1124 | i915_gem_load(dev); |
1084 | 1125 | ||
1085 | /* Init HWS */ | 1126 | /* Init HWS */ |
1086 | if (!I915_NEED_GFX_HWS(dev)) { | 1127 | if (!I915_NEED_GFX_HWS(dev)) { |
1087 | ret = i915_init_phys_hws(dev); | 1128 | ret = i915_init_phys_hws(dev); |
1088 | if (ret != 0) | 1129 | if (ret != 0) |
1089 | goto out_rmmap; | 1130 | goto out_iomapfree; |
1090 | } | 1131 | } |
1091 | 1132 | ||
1092 | /* On the 945G/GM, the chipset reports the MSI capability on the | 1133 | /* On the 945G/GM, the chipset reports the MSI capability on the |
@@ -1125,6 +1166,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1125 | 1166 | ||
1126 | return 0; | 1167 | return 0; |
1127 | 1168 | ||
1169 | out_iomapfree: | ||
1170 | io_mapping_free(dev_priv->mm.gtt_mapping); | ||
1128 | out_rmmap: | 1171 | out_rmmap: |
1129 | iounmap(dev_priv->regs); | 1172 | iounmap(dev_priv->regs); |
1130 | free_priv: | 1173 | free_priv: |
@@ -1136,8 +1179,14 @@ int i915_driver_unload(struct drm_device *dev) | |||
1136 | { | 1179 | { |
1137 | struct drm_i915_private *dev_priv = dev->dev_private; | 1180 | struct drm_i915_private *dev_priv = dev->dev_private; |
1138 | 1181 | ||
1182 | io_mapping_free(dev_priv->mm.gtt_mapping); | ||
1183 | if (dev_priv->mm.gtt_mtrr >= 0) { | ||
1184 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | ||
1185 | dev->agp->agp_info.aper_size * 1024 * 1024); | ||
1186 | dev_priv->mm.gtt_mtrr = -1; | ||
1187 | } | ||
1188 | |||
1139 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1189 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1140 | io_mapping_free(dev_priv->mm.gtt_mapping); | ||
1141 | drm_irq_uninstall(dev); | 1190 | drm_irq_uninstall(dev); |
1142 | } | 1191 | } |
1143 | 1192 | ||
@@ -1152,6 +1201,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
1152 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1201 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1153 | intel_modeset_cleanup(dev); | 1202 | intel_modeset_cleanup(dev); |
1154 | 1203 | ||
1204 | i915_gem_free_all_phys_object(dev); | ||
1205 | |||
1155 | mutex_lock(&dev->struct_mutex); | 1206 | mutex_lock(&dev->struct_mutex); |
1156 | i915_gem_cleanup_ringbuffer(dev); | 1207 | i915_gem_cleanup_ringbuffer(dev); |
1157 | mutex_unlock(&dev->struct_mutex); | 1208 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f8b3df0926c0..b293ef0bae71 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -27,6 +27,7 @@ | |||
27 | * | 27 | * |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/device.h> | ||
30 | #include "drmP.h" | 31 | #include "drmP.h" |
31 | #include "drm.h" | 32 | #include "drm.h" |
32 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
@@ -66,6 +67,14 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
66 | 67 | ||
67 | i915_save_state(dev); | 68 | i915_save_state(dev); |
68 | 69 | ||
70 | /* If KMS is active, we do the leavevt stuff here */ | ||
71 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
72 | if (i915_gem_idle(dev)) | ||
73 | dev_err(&dev->pdev->dev, | ||
74 | "GEM idle failed, resume may fail\n"); | ||
75 | drm_irq_uninstall(dev); | ||
76 | } | ||
77 | |||
69 | intel_opregion_free(dev); | 78 | intel_opregion_free(dev); |
70 | 79 | ||
71 | if (state.event == PM_EVENT_SUSPEND) { | 80 | if (state.event == PM_EVENT_SUSPEND) { |
@@ -79,6 +88,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
79 | 88 | ||
80 | static int i915_resume(struct drm_device *dev) | 89 | static int i915_resume(struct drm_device *dev) |
81 | { | 90 | { |
91 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
92 | int ret = 0; | ||
93 | |||
82 | pci_set_power_state(dev->pdev, PCI_D0); | 94 | pci_set_power_state(dev->pdev, PCI_D0); |
83 | pci_restore_state(dev->pdev); | 95 | pci_restore_state(dev->pdev); |
84 | if (pci_enable_device(dev->pdev)) | 96 | if (pci_enable_device(dev->pdev)) |
@@ -89,11 +101,26 @@ static int i915_resume(struct drm_device *dev) | |||
89 | 101 | ||
90 | intel_opregion_init(dev); | 102 | intel_opregion_init(dev); |
91 | 103 | ||
92 | return 0; | 104 | /* KMS EnterVT equivalent */ |
105 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
106 | mutex_lock(&dev->struct_mutex); | ||
107 | dev_priv->mm.suspended = 0; | ||
108 | |||
109 | ret = i915_gem_init_ringbuffer(dev); | ||
110 | if (ret != 0) | ||
111 | ret = -1; | ||
112 | mutex_unlock(&dev->struct_mutex); | ||
113 | |||
114 | drm_irq_install(dev); | ||
115 | } | ||
116 | |||
117 | return ret; | ||
93 | } | 118 | } |
94 | 119 | ||
95 | static struct vm_operations_struct i915_gem_vm_ops = { | 120 | static struct vm_operations_struct i915_gem_vm_ops = { |
96 | .fault = i915_gem_fault, | 121 | .fault = i915_gem_fault, |
122 | .open = drm_gem_vm_open, | ||
123 | .close = drm_gem_vm_close, | ||
97 | }; | 124 | }; |
98 | 125 | ||
99 | static struct drm_driver driver = { | 126 | static struct drm_driver driver = { |
@@ -112,7 +139,6 @@ static struct drm_driver driver = { | |||
112 | .suspend = i915_suspend, | 139 | .suspend = i915_suspend, |
113 | .resume = i915_resume, | 140 | .resume = i915_resume, |
114 | .device_is_agp = i915_driver_device_is_agp, | 141 | .device_is_agp = i915_driver_device_is_agp, |
115 | .get_vblank_counter = i915_get_vblank_counter, | ||
116 | .enable_vblank = i915_enable_vblank, | 142 | .enable_vblank = i915_enable_vblank, |
117 | .disable_vblank = i915_disable_vblank, | 143 | .disable_vblank = i915_disable_vblank, |
118 | .irq_preinstall = i915_driver_irq_preinstall, | 144 | .irq_preinstall = i915_driver_irq_preinstall, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 563de18063fd..17fa40858d26 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -72,6 +72,18 @@ enum pipe { | |||
72 | #define WATCH_INACTIVE 0 | 72 | #define WATCH_INACTIVE 0 |
73 | #define WATCH_PWRITE 0 | 73 | #define WATCH_PWRITE 0 |
74 | 74 | ||
75 | #define I915_GEM_PHYS_CURSOR_0 1 | ||
76 | #define I915_GEM_PHYS_CURSOR_1 2 | ||
77 | #define I915_GEM_PHYS_OVERLAY_REGS 3 | ||
78 | #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) | ||
79 | |||
80 | struct drm_i915_gem_phys_object { | ||
81 | int id; | ||
82 | struct page **page_list; | ||
83 | drm_dma_handle_t *handle; | ||
84 | struct drm_gem_object *cur_obj; | ||
85 | }; | ||
86 | |||
75 | typedef struct _drm_i915_ring_buffer { | 87 | typedef struct _drm_i915_ring_buffer { |
76 | int tail_mask; | 88 | int tail_mask; |
77 | unsigned long Size; | 89 | unsigned long Size; |
@@ -172,6 +184,8 @@ typedef struct drm_i915_private { | |||
172 | unsigned int lvds_dither:1; | 184 | unsigned int lvds_dither:1; |
173 | unsigned int lvds_vbt:1; | 185 | unsigned int lvds_vbt:1; |
174 | unsigned int int_crt_support:1; | 186 | unsigned int int_crt_support:1; |
187 | unsigned int lvds_use_ssc:1; | ||
188 | int lvds_ssc_freq; | ||
175 | 189 | ||
176 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ | 190 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
177 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | 191 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
@@ -272,6 +286,7 @@ typedef struct drm_i915_private { | |||
272 | struct drm_mm gtt_space; | 286 | struct drm_mm gtt_space; |
273 | 287 | ||
274 | struct io_mapping *gtt_mapping; | 288 | struct io_mapping *gtt_mapping; |
289 | int gtt_mtrr; | ||
275 | 290 | ||
276 | /** | 291 | /** |
277 | * List of objects currently involved in rendering from the | 292 | * List of objects currently involved in rendering from the |
@@ -358,6 +373,9 @@ typedef struct drm_i915_private { | |||
358 | uint32_t bit_6_swizzle_x; | 373 | uint32_t bit_6_swizzle_x; |
359 | /** Bit 6 swizzling required for Y tiling */ | 374 | /** Bit 6 swizzling required for Y tiling */ |
360 | uint32_t bit_6_swizzle_y; | 375 | uint32_t bit_6_swizzle_y; |
376 | |||
377 | /* storage for physical objects */ | ||
378 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; | ||
361 | } mm; | 379 | } mm; |
362 | } drm_i915_private_t; | 380 | } drm_i915_private_t; |
363 | 381 | ||
@@ -436,6 +454,9 @@ struct drm_i915_gem_object { | |||
436 | /** User space pin count and filp owning the pin */ | 454 | /** User space pin count and filp owning the pin */ |
437 | uint32_t user_pin_count; | 455 | uint32_t user_pin_count; |
438 | struct drm_file *pin_filp; | 456 | struct drm_file *pin_filp; |
457 | |||
458 | /** for phy allocated objects */ | ||
459 | struct drm_i915_gem_phys_object *phys_obj; | ||
439 | }; | 460 | }; |
440 | 461 | ||
441 | /** | 462 | /** |
@@ -516,6 +537,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, | |||
516 | extern int i915_enable_vblank(struct drm_device *dev, int crtc); | 537 | extern int i915_enable_vblank(struct drm_device *dev, int crtc); |
517 | extern void i915_disable_vblank(struct drm_device *dev, int crtc); | 538 | extern void i915_disable_vblank(struct drm_device *dev, int crtc); |
518 | extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); | 539 | extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); |
540 | extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); | ||
519 | extern int i915_vblank_swap(struct drm_device *dev, void *data, | 541 | extern int i915_vblank_swap(struct drm_device *dev, void *data, |
520 | struct drm_file *file_priv); | 542 | struct drm_file *file_priv); |
521 | extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); | 543 | extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); |
@@ -583,6 +605,7 @@ int i915_gem_init_object(struct drm_gem_object *obj); | |||
583 | void i915_gem_free_object(struct drm_gem_object *obj); | 605 | void i915_gem_free_object(struct drm_gem_object *obj); |
584 | int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); | 606 | int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); |
585 | void i915_gem_object_unpin(struct drm_gem_object *obj); | 607 | void i915_gem_object_unpin(struct drm_gem_object *obj); |
608 | int i915_gem_object_unbind(struct drm_gem_object *obj); | ||
586 | void i915_gem_lastclose(struct drm_device *dev); | 609 | void i915_gem_lastclose(struct drm_device *dev); |
587 | uint32_t i915_get_gem_seqno(struct drm_device *dev); | 610 | uint32_t i915_get_gem_seqno(struct drm_device *dev); |
588 | void i915_gem_retire_requests(struct drm_device *dev); | 611 | void i915_gem_retire_requests(struct drm_device *dev); |
@@ -595,9 +618,15 @@ int i915_gem_init_ringbuffer(struct drm_device *dev); | |||
595 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 618 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
596 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 619 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
597 | unsigned long end); | 620 | unsigned long end); |
621 | int i915_gem_idle(struct drm_device *dev); | ||
598 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 622 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
599 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | 623 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, |
600 | int write); | 624 | int write); |
625 | int i915_gem_attach_phys_object(struct drm_device *dev, | ||
626 | struct drm_gem_object *obj, int id); | ||
627 | void i915_gem_detach_phys_object(struct drm_device *dev, | ||
628 | struct drm_gem_object *obj); | ||
629 | void i915_gem_free_all_phys_object(struct drm_device *dev); | ||
601 | 630 | ||
602 | /* i915_gem_tiling.c */ | 631 | /* i915_gem_tiling.c */ |
603 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 632 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
@@ -761,6 +790,11 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
761 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) | 790 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) |
762 | 791 | ||
763 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) | 792 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) |
793 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | ||
794 | * rows, which changed the alignment requirements and fence programming. | ||
795 | */ | ||
796 | #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ | ||
797 | IS_I915GM(dev))) | ||
764 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) | 798 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) |
765 | 799 | ||
766 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 800 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1384d6686555..85685bfd12da 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -34,10 +34,6 @@ | |||
34 | 34 | ||
35 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | 35 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
36 | 36 | ||
37 | static void | ||
38 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | ||
39 | uint32_t read_domains, | ||
40 | uint32_t write_domain); | ||
41 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 37 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
42 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 38 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
43 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 39 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
@@ -52,9 +48,12 @@ static void i915_gem_object_free_page_list(struct drm_gem_object *obj); | |||
52 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | 48 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); |
53 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 49 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
54 | unsigned alignment); | 50 | unsigned alignment); |
55 | static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); | 51 | static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write); |
56 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 52 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); |
57 | static int i915_gem_evict_something(struct drm_device *dev); | 53 | static int i915_gem_evict_something(struct drm_device *dev); |
54 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | ||
55 | struct drm_i915_gem_pwrite *args, | ||
56 | struct drm_file *file_priv); | ||
58 | 57 | ||
59 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 58 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
60 | unsigned long end) | 59 | unsigned long end) |
@@ -386,8 +385,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
386 | * pread/pwrite currently are reading and writing from the CPU | 385 | * pread/pwrite currently are reading and writing from the CPU |
387 | * perspective, requiring manual detiling by the client. | 386 | * perspective, requiring manual detiling by the client. |
388 | */ | 387 | */ |
389 | if (obj_priv->tiling_mode == I915_TILING_NONE && | 388 | if (obj_priv->phys_obj) |
390 | dev->gtt_total != 0) | 389 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); |
390 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | ||
391 | dev->gtt_total != 0) | ||
391 | ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); | 392 | ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); |
392 | else | 393 | else |
393 | ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); | 394 | ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); |
@@ -562,6 +563,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
562 | pgoff_t page_offset; | 563 | pgoff_t page_offset; |
563 | unsigned long pfn; | 564 | unsigned long pfn; |
564 | int ret = 0; | 565 | int ret = 0; |
566 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); | ||
565 | 567 | ||
566 | /* We don't use vmf->pgoff since that has the fake offset */ | 568 | /* We don't use vmf->pgoff since that has the fake offset */ |
567 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | 569 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> |
@@ -580,8 +582,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
580 | 582 | ||
581 | /* Need a new fence register? */ | 583 | /* Need a new fence register? */ |
582 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | 584 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && |
583 | obj_priv->tiling_mode != I915_TILING_NONE) | 585 | obj_priv->tiling_mode != I915_TILING_NONE) { |
584 | i915_gem_object_get_fence_reg(obj); | 586 | ret = i915_gem_object_get_fence_reg(obj, write); |
587 | if (ret) { | ||
588 | mutex_unlock(&dev->struct_mutex); | ||
589 | return VM_FAULT_SIGBUS; | ||
590 | } | ||
591 | } | ||
585 | 592 | ||
586 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 593 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + |
587 | page_offset; | 594 | page_offset; |
@@ -596,8 +603,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
596 | case -EAGAIN: | 603 | case -EAGAIN: |
597 | return VM_FAULT_OOM; | 604 | return VM_FAULT_OOM; |
598 | case -EFAULT: | 605 | case -EFAULT: |
599 | case -EBUSY: | ||
600 | DRM_ERROR("can't insert pfn?? fault or busy...\n"); | ||
601 | return VM_FAULT_SIGBUS; | 606 | return VM_FAULT_SIGBUS; |
602 | default: | 607 | default: |
603 | return VM_FAULT_NOPAGE; | 608 | return VM_FAULT_NOPAGE; |
@@ -673,6 +678,30 @@ out_free_list: | |||
673 | return ret; | 678 | return ret; |
674 | } | 679 | } |
675 | 680 | ||
681 | static void | ||
682 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | ||
683 | { | ||
684 | struct drm_device *dev = obj->dev; | ||
685 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
686 | struct drm_gem_mm *mm = dev->mm_private; | ||
687 | struct drm_map_list *list; | ||
688 | |||
689 | list = &obj->map_list; | ||
690 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | ||
691 | |||
692 | if (list->file_offset_node) { | ||
693 | drm_mm_put_block(list->file_offset_node); | ||
694 | list->file_offset_node = NULL; | ||
695 | } | ||
696 | |||
697 | if (list->map) { | ||
698 | drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); | ||
699 | list->map = NULL; | ||
700 | } | ||
701 | |||
702 | obj_priv->mmap_offset = 0; | ||
703 | } | ||
704 | |||
676 | /** | 705 | /** |
677 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object | 706 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object |
678 | * @obj: object to check | 707 | * @obj: object to check |
@@ -747,8 +776,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
747 | 776 | ||
748 | if (!obj_priv->mmap_offset) { | 777 | if (!obj_priv->mmap_offset) { |
749 | ret = i915_gem_create_mmap_offset(obj); | 778 | ret = i915_gem_create_mmap_offset(obj); |
750 | if (ret) | 779 | if (ret) { |
780 | drm_gem_object_unreference(obj); | ||
781 | mutex_unlock(&dev->struct_mutex); | ||
751 | return ret; | 782 | return ret; |
783 | } | ||
752 | } | 784 | } |
753 | 785 | ||
754 | args->offset = obj_priv->mmap_offset; | 786 | args->offset = obj_priv->mmap_offset; |
@@ -1019,6 +1051,9 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
1019 | drm_i915_private_t *dev_priv = dev->dev_private; | 1051 | drm_i915_private_t *dev_priv = dev->dev_private; |
1020 | uint32_t seqno; | 1052 | uint32_t seqno; |
1021 | 1053 | ||
1054 | if (!dev_priv->hw_status_page) | ||
1055 | return; | ||
1056 | |||
1022 | seqno = i915_get_gem_seqno(dev); | 1057 | seqno = i915_get_gem_seqno(dev); |
1023 | 1058 | ||
1024 | while (!list_empty(&dev_priv->mm.request_list)) { | 1059 | while (!list_empty(&dev_priv->mm.request_list)) { |
@@ -1206,7 +1241,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) | |||
1206 | /** | 1241 | /** |
1207 | * Unbinds an object from the GTT aperture. | 1242 | * Unbinds an object from the GTT aperture. |
1208 | */ | 1243 | */ |
1209 | static int | 1244 | int |
1210 | i915_gem_object_unbind(struct drm_gem_object *obj) | 1245 | i915_gem_object_unbind(struct drm_gem_object *obj) |
1211 | { | 1246 | { |
1212 | struct drm_device *dev = obj->dev; | 1247 | struct drm_device *dev = obj->dev; |
@@ -1440,21 +1475,26 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
1440 | drm_i915_private_t *dev_priv = dev->dev_private; | 1475 | drm_i915_private_t *dev_priv = dev->dev_private; |
1441 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1476 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1442 | int regnum = obj_priv->fence_reg; | 1477 | int regnum = obj_priv->fence_reg; |
1478 | int tile_width; | ||
1443 | uint32_t val; | 1479 | uint32_t val; |
1444 | uint32_t pitch_val; | 1480 | uint32_t pitch_val; |
1445 | 1481 | ||
1446 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | 1482 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || |
1447 | (obj_priv->gtt_offset & (obj->size - 1))) { | 1483 | (obj_priv->gtt_offset & (obj->size - 1))) { |
1448 | WARN(1, "%s: object not 1M or size aligned\n", __func__); | 1484 | WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n", |
1485 | __func__, obj_priv->gtt_offset, obj->size); | ||
1449 | return; | 1486 | return; |
1450 | } | 1487 | } |
1451 | 1488 | ||
1452 | if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) || | 1489 | if (obj_priv->tiling_mode == I915_TILING_Y && |
1453 | IS_I945GM(dev) || | 1490 | HAS_128_BYTE_Y_TILING(dev)) |
1454 | IS_G33(dev))) | 1491 | tile_width = 128; |
1455 | pitch_val = (obj_priv->stride / 128) - 1; | ||
1456 | else | 1492 | else |
1457 | pitch_val = (obj_priv->stride / 512) - 1; | 1493 | tile_width = 512; |
1494 | |||
1495 | /* Note: pitch better be a power of two tile widths */ | ||
1496 | pitch_val = obj_priv->stride / tile_width; | ||
1497 | pitch_val = ffs(pitch_val) - 1; | ||
1458 | 1498 | ||
1459 | val = obj_priv->gtt_offset; | 1499 | val = obj_priv->gtt_offset; |
1460 | if (obj_priv->tiling_mode == I915_TILING_Y) | 1500 | if (obj_priv->tiling_mode == I915_TILING_Y) |
@@ -1478,7 +1518,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
1478 | 1518 | ||
1479 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | 1519 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || |
1480 | (obj_priv->gtt_offset & (obj->size - 1))) { | 1520 | (obj_priv->gtt_offset & (obj->size - 1))) { |
1481 | WARN(1, "%s: object not 1M or size aligned\n", __func__); | 1521 | WARN(1, "%s: object 0x%08x not 1M or size aligned\n", |
1522 | __func__, obj_priv->gtt_offset); | ||
1482 | return; | 1523 | return; |
1483 | } | 1524 | } |
1484 | 1525 | ||
@@ -1498,6 +1539,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
1498 | /** | 1539 | /** |
1499 | * i915_gem_object_get_fence_reg - set up a fence reg for an object | 1540 | * i915_gem_object_get_fence_reg - set up a fence reg for an object |
1500 | * @obj: object to map through a fence reg | 1541 | * @obj: object to map through a fence reg |
1542 | * @write: object is about to be written | ||
1501 | * | 1543 | * |
1502 | * When mapping objects through the GTT, userspace wants to be able to write | 1544 | * When mapping objects through the GTT, userspace wants to be able to write |
1503 | * to them without having to worry about swizzling if the object is tiled. | 1545 | * to them without having to worry about swizzling if the object is tiled. |
@@ -1508,8 +1550,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
1508 | * It then sets up the reg based on the object's properties: address, pitch | 1550 | * It then sets up the reg based on the object's properties: address, pitch |
1509 | * and tiling format. | 1551 | * and tiling format. |
1510 | */ | 1552 | */ |
1511 | static void | 1553 | static int |
1512 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | 1554 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) |
1513 | { | 1555 | { |
1514 | struct drm_device *dev = obj->dev; | 1556 | struct drm_device *dev = obj->dev; |
1515 | struct drm_i915_private *dev_priv = dev->dev_private; | 1557 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1522,12 +1564,18 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
1522 | WARN(1, "allocating a fence for non-tiled object?\n"); | 1564 | WARN(1, "allocating a fence for non-tiled object?\n"); |
1523 | break; | 1565 | break; |
1524 | case I915_TILING_X: | 1566 | case I915_TILING_X: |
1525 | WARN(obj_priv->stride & (512 - 1), | 1567 | if (!obj_priv->stride) |
1526 | "object is X tiled but has non-512B pitch\n"); | 1568 | return -EINVAL; |
1569 | WARN((obj_priv->stride & (512 - 1)), | ||
1570 | "object 0x%08x is X tiled but has non-512B pitch\n", | ||
1571 | obj_priv->gtt_offset); | ||
1527 | break; | 1572 | break; |
1528 | case I915_TILING_Y: | 1573 | case I915_TILING_Y: |
1529 | WARN(obj_priv->stride & (128 - 1), | 1574 | if (!obj_priv->stride) |
1530 | "object is Y tiled but has non-128B pitch\n"); | 1575 | return -EINVAL; |
1576 | WARN((obj_priv->stride & (128 - 1)), | ||
1577 | "object 0x%08x is Y tiled but has non-128B pitch\n", | ||
1578 | obj_priv->gtt_offset); | ||
1531 | break; | 1579 | break; |
1532 | } | 1580 | } |
1533 | 1581 | ||
@@ -1558,10 +1606,11 @@ try_again: | |||
1558 | * objects to finish before trying again. | 1606 | * objects to finish before trying again. |
1559 | */ | 1607 | */ |
1560 | if (i == dev_priv->num_fence_regs) { | 1608 | if (i == dev_priv->num_fence_regs) { |
1561 | ret = i915_gem_object_wait_rendering(reg->obj); | 1609 | ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0); |
1562 | if (ret) { | 1610 | if (ret) { |
1563 | WARN(ret, "wait_rendering failed: %d\n", ret); | 1611 | WARN(ret != -ERESTARTSYS, |
1564 | return; | 1612 | "switch to GTT domain failed: %d\n", ret); |
1613 | return ret; | ||
1565 | } | 1614 | } |
1566 | goto try_again; | 1615 | goto try_again; |
1567 | } | 1616 | } |
@@ -1586,6 +1635,8 @@ try_again: | |||
1586 | i915_write_fence_reg(reg); | 1635 | i915_write_fence_reg(reg); |
1587 | else | 1636 | else |
1588 | i830_write_fence_reg(reg); | 1637 | i830_write_fence_reg(reg); |
1638 | |||
1639 | return 0; | ||
1589 | } | 1640 | } |
1590 | 1641 | ||
1591 | /** | 1642 | /** |
@@ -1626,7 +1677,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
1626 | if (dev_priv->mm.suspended) | 1677 | if (dev_priv->mm.suspended) |
1627 | return -EBUSY; | 1678 | return -EBUSY; |
1628 | if (alignment == 0) | 1679 | if (alignment == 0) |
1629 | alignment = PAGE_SIZE; | 1680 | alignment = i915_gem_get_gtt_alignment(obj); |
1630 | if (alignment & (PAGE_SIZE - 1)) { | 1681 | if (alignment & (PAGE_SIZE - 1)) { |
1631 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); | 1682 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); |
1632 | return -EINVAL; | 1683 | return -EINVAL; |
@@ -1969,30 +2020,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
1969 | * drm_agp_chipset_flush | 2020 | * drm_agp_chipset_flush |
1970 | */ | 2021 | */ |
1971 | static void | 2022 | static void |
1972 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | 2023 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) |
1973 | uint32_t read_domains, | ||
1974 | uint32_t write_domain) | ||
1975 | { | 2024 | { |
1976 | struct drm_device *dev = obj->dev; | 2025 | struct drm_device *dev = obj->dev; |
1977 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2026 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1978 | uint32_t invalidate_domains = 0; | 2027 | uint32_t invalidate_domains = 0; |
1979 | uint32_t flush_domains = 0; | 2028 | uint32_t flush_domains = 0; |
1980 | 2029 | ||
1981 | BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); | 2030 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); |
1982 | BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); | 2031 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); |
1983 | 2032 | ||
1984 | #if WATCH_BUF | 2033 | #if WATCH_BUF |
1985 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", | 2034 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", |
1986 | __func__, obj, | 2035 | __func__, obj, |
1987 | obj->read_domains, read_domains, | 2036 | obj->read_domains, obj->pending_read_domains, |
1988 | obj->write_domain, write_domain); | 2037 | obj->write_domain, obj->pending_write_domain); |
1989 | #endif | 2038 | #endif |
1990 | /* | 2039 | /* |
1991 | * If the object isn't moving to a new write domain, | 2040 | * If the object isn't moving to a new write domain, |
1992 | * let the object stay in multiple read domains | 2041 | * let the object stay in multiple read domains |
1993 | */ | 2042 | */ |
1994 | if (write_domain == 0) | 2043 | if (obj->pending_write_domain == 0) |
1995 | read_domains |= obj->read_domains; | 2044 | obj->pending_read_domains |= obj->read_domains; |
1996 | else | 2045 | else |
1997 | obj_priv->dirty = 1; | 2046 | obj_priv->dirty = 1; |
1998 | 2047 | ||
@@ -2002,15 +2051,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
2002 | * any read domains which differ from the old | 2051 | * any read domains which differ from the old |
2003 | * write domain | 2052 | * write domain |
2004 | */ | 2053 | */ |
2005 | if (obj->write_domain && obj->write_domain != read_domains) { | 2054 | if (obj->write_domain && |
2055 | obj->write_domain != obj->pending_read_domains) { | ||
2006 | flush_domains |= obj->write_domain; | 2056 | flush_domains |= obj->write_domain; |
2007 | invalidate_domains |= read_domains & ~obj->write_domain; | 2057 | invalidate_domains |= |
2058 | obj->pending_read_domains & ~obj->write_domain; | ||
2008 | } | 2059 | } |
2009 | /* | 2060 | /* |
2010 | * Invalidate any read caches which may have | 2061 | * Invalidate any read caches which may have |
2011 | * stale data. That is, any new read domains. | 2062 | * stale data. That is, any new read domains. |
2012 | */ | 2063 | */ |
2013 | invalidate_domains |= read_domains & ~obj->read_domains; | 2064 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; |
2014 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { | 2065 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { |
2015 | #if WATCH_BUF | 2066 | #if WATCH_BUF |
2016 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", | 2067 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", |
@@ -2019,9 +2070,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
2019 | i915_gem_clflush_object(obj); | 2070 | i915_gem_clflush_object(obj); |
2020 | } | 2071 | } |
2021 | 2072 | ||
2022 | if ((write_domain | flush_domains) != 0) | 2073 | /* The actual obj->write_domain will be updated with |
2023 | obj->write_domain = write_domain; | 2074 | * pending_write_domain after we emit the accumulated flush for all |
2024 | obj->read_domains = read_domains; | 2075 | * of our domain changes in execbuffers (which clears objects' |
2076 | * write_domains). So if we have a current write domain that we | ||
2077 | * aren't changing, set pending_write_domain to that. | ||
2078 | */ | ||
2079 | if (flush_domains == 0 && obj->pending_write_domain == 0) | ||
2080 | obj->pending_write_domain = obj->write_domain; | ||
2081 | obj->read_domains = obj->pending_read_domains; | ||
2025 | 2082 | ||
2026 | dev->invalidate_domains |= invalidate_domains; | 2083 | dev->invalidate_domains |= invalidate_domains; |
2027 | dev->flush_domains |= flush_domains; | 2084 | dev->flush_domains |= flush_domains; |
@@ -2224,6 +2281,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2224 | (int) reloc.offset, | 2281 | (int) reloc.offset, |
2225 | reloc.read_domains, | 2282 | reloc.read_domains, |
2226 | reloc.write_domain); | 2283 | reloc.write_domain); |
2284 | drm_gem_object_unreference(target_obj); | ||
2285 | i915_gem_object_unpin(obj); | ||
2227 | return -EINVAL; | 2286 | return -EINVAL; |
2228 | } | 2287 | } |
2229 | 2288 | ||
@@ -2453,13 +2512,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2453 | if (dev_priv->mm.wedged) { | 2512 | if (dev_priv->mm.wedged) { |
2454 | DRM_ERROR("Execbuf while wedged\n"); | 2513 | DRM_ERROR("Execbuf while wedged\n"); |
2455 | mutex_unlock(&dev->struct_mutex); | 2514 | mutex_unlock(&dev->struct_mutex); |
2456 | return -EIO; | 2515 | ret = -EIO; |
2516 | goto pre_mutex_err; | ||
2457 | } | 2517 | } |
2458 | 2518 | ||
2459 | if (dev_priv->mm.suspended) { | 2519 | if (dev_priv->mm.suspended) { |
2460 | DRM_ERROR("Execbuf while VT-switched.\n"); | 2520 | DRM_ERROR("Execbuf while VT-switched.\n"); |
2461 | mutex_unlock(&dev->struct_mutex); | 2521 | mutex_unlock(&dev->struct_mutex); |
2462 | return -EBUSY; | 2522 | ret = -EBUSY; |
2523 | goto pre_mutex_err; | ||
2463 | } | 2524 | } |
2464 | 2525 | ||
2465 | /* Look up object handles */ | 2526 | /* Look up object handles */ |
@@ -2527,9 +2588,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2527 | struct drm_gem_object *obj = object_list[i]; | 2588 | struct drm_gem_object *obj = object_list[i]; |
2528 | 2589 | ||
2529 | /* Compute new gpu domains and update invalidate/flush */ | 2590 | /* Compute new gpu domains and update invalidate/flush */ |
2530 | i915_gem_object_set_to_gpu_domain(obj, | 2591 | i915_gem_object_set_to_gpu_domain(obj); |
2531 | obj->pending_read_domains, | ||
2532 | obj->pending_write_domain); | ||
2533 | } | 2592 | } |
2534 | 2593 | ||
2535 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2594 | i915_verify_inactive(dev, __FILE__, __LINE__); |
@@ -2548,6 +2607,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2548 | (void)i915_add_request(dev, dev->flush_domains); | 2607 | (void)i915_add_request(dev, dev->flush_domains); |
2549 | } | 2608 | } |
2550 | 2609 | ||
2610 | for (i = 0; i < args->buffer_count; i++) { | ||
2611 | struct drm_gem_object *obj = object_list[i]; | ||
2612 | |||
2613 | obj->write_domain = obj->pending_write_domain; | ||
2614 | } | ||
2615 | |||
2551 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2616 | i915_verify_inactive(dev, __FILE__, __LINE__); |
2552 | 2617 | ||
2553 | #if WATCH_COHERENCY | 2618 | #if WATCH_COHERENCY |
@@ -2605,15 +2670,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2605 | 2670 | ||
2606 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2671 | i915_verify_inactive(dev, __FILE__, __LINE__); |
2607 | 2672 | ||
2608 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
2609 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
2610 | (uintptr_t) args->buffers_ptr, | ||
2611 | exec_list, | ||
2612 | sizeof(*exec_list) * args->buffer_count); | ||
2613 | if (ret) | ||
2614 | DRM_ERROR("failed to copy %d exec entries " | ||
2615 | "back to user (%d)\n", | ||
2616 | args->buffer_count, ret); | ||
2617 | err: | 2673 | err: |
2618 | for (i = 0; i < pinned; i++) | 2674 | for (i = 0; i < pinned; i++) |
2619 | i915_gem_object_unpin(object_list[i]); | 2675 | i915_gem_object_unpin(object_list[i]); |
@@ -2623,6 +2679,18 @@ err: | |||
2623 | 2679 | ||
2624 | mutex_unlock(&dev->struct_mutex); | 2680 | mutex_unlock(&dev->struct_mutex); |
2625 | 2681 | ||
2682 | if (!ret) { | ||
2683 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
2684 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
2685 | (uintptr_t) args->buffers_ptr, | ||
2686 | exec_list, | ||
2687 | sizeof(*exec_list) * args->buffer_count); | ||
2688 | if (ret) | ||
2689 | DRM_ERROR("failed to copy %d exec entries " | ||
2690 | "back to user (%d)\n", | ||
2691 | args->buffer_count, ret); | ||
2692 | } | ||
2693 | |||
2626 | pre_mutex_err: | 2694 | pre_mutex_err: |
2627 | drm_free(object_list, sizeof(*object_list) * args->buffer_count, | 2695 | drm_free(object_list, sizeof(*object_list) * args->buffer_count, |
2628 | DRM_MEM_DRIVER); | 2696 | DRM_MEM_DRIVER); |
@@ -2647,6 +2715,14 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
2647 | DRM_ERROR("Failure to bind: %d", ret); | 2715 | DRM_ERROR("Failure to bind: %d", ret); |
2648 | return ret; | 2716 | return ret; |
2649 | } | 2717 | } |
2718 | /* | ||
2719 | * Pre-965 chips need a fence register set up in order to | ||
2720 | * properly handle tiled surfaces. | ||
2721 | */ | ||
2722 | if (!IS_I965G(dev) && | ||
2723 | obj_priv->fence_reg == I915_FENCE_REG_NONE && | ||
2724 | obj_priv->tiling_mode != I915_TILING_NONE) | ||
2725 | i915_gem_object_get_fence_reg(obj, true); | ||
2650 | } | 2726 | } |
2651 | obj_priv->pin_count++; | 2727 | obj_priv->pin_count++; |
2652 | 2728 | ||
@@ -2718,6 +2794,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
2718 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 2794 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { |
2719 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 2795 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
2720 | args->handle); | 2796 | args->handle); |
2797 | drm_gem_object_unreference(obj); | ||
2721 | mutex_unlock(&dev->struct_mutex); | 2798 | mutex_unlock(&dev->struct_mutex); |
2722 | return -EINVAL; | 2799 | return -EINVAL; |
2723 | } | 2800 | } |
@@ -2798,6 +2875,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
2798 | return -EBADF; | 2875 | return -EBADF; |
2799 | } | 2876 | } |
2800 | 2877 | ||
2878 | /* Update the active list for the hardware's current position. | ||
2879 | * Otherwise this only updates on a delayed timer or when irqs are | ||
2880 | * actually unmasked, and our working set ends up being larger than | ||
2881 | * required. | ||
2882 | */ | ||
2883 | i915_gem_retire_requests(dev); | ||
2884 | |||
2801 | obj_priv = obj->driver_private; | 2885 | obj_priv = obj->driver_private; |
2802 | /* Don't count being on the flushing list against the object being | 2886 | /* Don't count being on the flushing list against the object being |
2803 | * done. Otherwise, a buffer left on the flushing list but not getting | 2887 | * done. Otherwise, a buffer left on the flushing list but not getting |
@@ -2850,29 +2934,17 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
2850 | void i915_gem_free_object(struct drm_gem_object *obj) | 2934 | void i915_gem_free_object(struct drm_gem_object *obj) |
2851 | { | 2935 | { |
2852 | struct drm_device *dev = obj->dev; | 2936 | struct drm_device *dev = obj->dev; |
2853 | struct drm_gem_mm *mm = dev->mm_private; | ||
2854 | struct drm_map_list *list; | ||
2855 | struct drm_map *map; | ||
2856 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2937 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2857 | 2938 | ||
2858 | while (obj_priv->pin_count > 0) | 2939 | while (obj_priv->pin_count > 0) |
2859 | i915_gem_object_unpin(obj); | 2940 | i915_gem_object_unpin(obj); |
2860 | 2941 | ||
2861 | i915_gem_object_unbind(obj); | 2942 | if (obj_priv->phys_obj) |
2943 | i915_gem_detach_phys_object(dev, obj); | ||
2862 | 2944 | ||
2863 | list = &obj->map_list; | 2945 | i915_gem_object_unbind(obj); |
2864 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | ||
2865 | |||
2866 | if (list->file_offset_node) { | ||
2867 | drm_mm_put_block(list->file_offset_node); | ||
2868 | list->file_offset_node = NULL; | ||
2869 | } | ||
2870 | 2946 | ||
2871 | map = list->map; | 2947 | i915_gem_free_mmap_offset(obj); |
2872 | if (map) { | ||
2873 | drm_free(map, sizeof(*map), DRM_MEM_DRIVER); | ||
2874 | list->map = NULL; | ||
2875 | } | ||
2876 | 2948 | ||
2877 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); | 2949 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); |
2878 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); | 2950 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); |
@@ -2911,7 +2983,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) | |||
2911 | return 0; | 2983 | return 0; |
2912 | } | 2984 | } |
2913 | 2985 | ||
2914 | static int | 2986 | int |
2915 | i915_gem_idle(struct drm_device *dev) | 2987 | i915_gem_idle(struct drm_device *dev) |
2916 | { | 2988 | { |
2917 | drm_i915_private_t *dev_priv = dev->dev_private; | 2989 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -3057,6 +3129,7 @@ i915_gem_init_hws(struct drm_device *dev) | |||
3057 | if (dev_priv->hw_status_page == NULL) { | 3129 | if (dev_priv->hw_status_page == NULL) { |
3058 | DRM_ERROR("Failed to map status page.\n"); | 3130 | DRM_ERROR("Failed to map status page.\n"); |
3059 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 3131 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
3132 | i915_gem_object_unpin(obj); | ||
3060 | drm_gem_object_unreference(obj); | 3133 | drm_gem_object_unreference(obj); |
3061 | return -EINVAL; | 3134 | return -EINVAL; |
3062 | } | 3135 | } |
@@ -3069,6 +3142,31 @@ i915_gem_init_hws(struct drm_device *dev) | |||
3069 | return 0; | 3142 | return 0; |
3070 | } | 3143 | } |
3071 | 3144 | ||
3145 | static void | ||
3146 | i915_gem_cleanup_hws(struct drm_device *dev) | ||
3147 | { | ||
3148 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3149 | struct drm_gem_object *obj; | ||
3150 | struct drm_i915_gem_object *obj_priv; | ||
3151 | |||
3152 | if (dev_priv->hws_obj == NULL) | ||
3153 | return; | ||
3154 | |||
3155 | obj = dev_priv->hws_obj; | ||
3156 | obj_priv = obj->driver_private; | ||
3157 | |||
3158 | kunmap(obj_priv->page_list[0]); | ||
3159 | i915_gem_object_unpin(obj); | ||
3160 | drm_gem_object_unreference(obj); | ||
3161 | dev_priv->hws_obj = NULL; | ||
3162 | |||
3163 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
3164 | dev_priv->hw_status_page = NULL; | ||
3165 | |||
3166 | /* Write high address into HWS_PGA when disabling. */ | ||
3167 | I915_WRITE(HWS_PGA, 0x1ffff000); | ||
3168 | } | ||
3169 | |||
3072 | int | 3170 | int |
3073 | i915_gem_init_ringbuffer(struct drm_device *dev) | 3171 | i915_gem_init_ringbuffer(struct drm_device *dev) |
3074 | { | 3172 | { |
@@ -3086,6 +3184,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
3086 | obj = drm_gem_object_alloc(dev, 128 * 1024); | 3184 | obj = drm_gem_object_alloc(dev, 128 * 1024); |
3087 | if (obj == NULL) { | 3185 | if (obj == NULL) { |
3088 | DRM_ERROR("Failed to allocate ringbuffer\n"); | 3186 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
3187 | i915_gem_cleanup_hws(dev); | ||
3089 | return -ENOMEM; | 3188 | return -ENOMEM; |
3090 | } | 3189 | } |
3091 | obj_priv = obj->driver_private; | 3190 | obj_priv = obj->driver_private; |
@@ -3093,6 +3192,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
3093 | ret = i915_gem_object_pin(obj, 4096); | 3192 | ret = i915_gem_object_pin(obj, 4096); |
3094 | if (ret != 0) { | 3193 | if (ret != 0) { |
3095 | drm_gem_object_unreference(obj); | 3194 | drm_gem_object_unreference(obj); |
3195 | i915_gem_cleanup_hws(dev); | ||
3096 | return ret; | 3196 | return ret; |
3097 | } | 3197 | } |
3098 | 3198 | ||
@@ -3110,7 +3210,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
3110 | if (ring->map.handle == NULL) { | 3210 | if (ring->map.handle == NULL) { |
3111 | DRM_ERROR("Failed to map ringbuffer.\n"); | 3211 | DRM_ERROR("Failed to map ringbuffer.\n"); |
3112 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | 3212 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); |
3213 | i915_gem_object_unpin(obj); | ||
3113 | drm_gem_object_unreference(obj); | 3214 | drm_gem_object_unreference(obj); |
3215 | i915_gem_cleanup_hws(dev); | ||
3114 | return -EINVAL; | 3216 | return -EINVAL; |
3115 | } | 3217 | } |
3116 | ring->ring_obj = obj; | 3218 | ring->ring_obj = obj; |
@@ -3190,20 +3292,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) | |||
3190 | dev_priv->ring.ring_obj = NULL; | 3292 | dev_priv->ring.ring_obj = NULL; |
3191 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | 3293 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); |
3192 | 3294 | ||
3193 | if (dev_priv->hws_obj != NULL) { | 3295 | i915_gem_cleanup_hws(dev); |
3194 | struct drm_gem_object *obj = dev_priv->hws_obj; | ||
3195 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3196 | |||
3197 | kunmap(obj_priv->page_list[0]); | ||
3198 | i915_gem_object_unpin(obj); | ||
3199 | drm_gem_object_unreference(obj); | ||
3200 | dev_priv->hws_obj = NULL; | ||
3201 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
3202 | dev_priv->hw_status_page = NULL; | ||
3203 | |||
3204 | /* Write high address into HWS_PGA when disabling. */ | ||
3205 | I915_WRITE(HWS_PGA, 0x1ffff000); | ||
3206 | } | ||
3207 | } | 3296 | } |
3208 | 3297 | ||
3209 | int | 3298 | int |
@@ -3221,10 +3310,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
3221 | dev_priv->mm.wedged = 0; | 3310 | dev_priv->mm.wedged = 0; |
3222 | } | 3311 | } |
3223 | 3312 | ||
3224 | dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base, | ||
3225 | dev->agp->agp_info.aper_size | ||
3226 | * 1024 * 1024); | ||
3227 | |||
3228 | mutex_lock(&dev->struct_mutex); | 3313 | mutex_lock(&dev->struct_mutex); |
3229 | dev_priv->mm.suspended = 0; | 3314 | dev_priv->mm.suspended = 0; |
3230 | 3315 | ||
@@ -3247,7 +3332,6 @@ int | |||
3247 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | 3332 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
3248 | struct drm_file *file_priv) | 3333 | struct drm_file *file_priv) |
3249 | { | 3334 | { |
3250 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3251 | int ret; | 3335 | int ret; |
3252 | 3336 | ||
3253 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 3337 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
@@ -3256,7 +3340,6 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | |||
3256 | ret = i915_gem_idle(dev); | 3340 | ret = i915_gem_idle(dev); |
3257 | drm_irq_uninstall(dev); | 3341 | drm_irq_uninstall(dev); |
3258 | 3342 | ||
3259 | io_mapping_free(dev_priv->mm.gtt_mapping); | ||
3260 | return ret; | 3343 | return ret; |
3261 | } | 3344 | } |
3262 | 3345 | ||
@@ -3265,6 +3348,9 @@ i915_gem_lastclose(struct drm_device *dev) | |||
3265 | { | 3348 | { |
3266 | int ret; | 3349 | int ret; |
3267 | 3350 | ||
3351 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
3352 | return; | ||
3353 | |||
3268 | ret = i915_gem_idle(dev); | 3354 | ret = i915_gem_idle(dev); |
3269 | if (ret) | 3355 | if (ret) |
3270 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 3356 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
@@ -3286,10 +3372,187 @@ i915_gem_load(struct drm_device *dev) | |||
3286 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 3372 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
3287 | dev_priv->fence_reg_start = 3; | 3373 | dev_priv->fence_reg_start = 3; |
3288 | 3374 | ||
3289 | if (IS_I965G(dev)) | 3375 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
3290 | dev_priv->num_fence_regs = 16; | 3376 | dev_priv->num_fence_regs = 16; |
3291 | else | 3377 | else |
3292 | dev_priv->num_fence_regs = 8; | 3378 | dev_priv->num_fence_regs = 8; |
3293 | 3379 | ||
3294 | i915_gem_detect_bit_6_swizzle(dev); | 3380 | i915_gem_detect_bit_6_swizzle(dev); |
3295 | } | 3381 | } |
3382 | |||
3383 | /* | ||
3384 | * Create a physically contiguous memory object for this object | ||
3385 | * e.g. for cursor + overlay regs | ||
3386 | */ | ||
3387 | int i915_gem_init_phys_object(struct drm_device *dev, | ||
3388 | int id, int size) | ||
3389 | { | ||
3390 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3391 | struct drm_i915_gem_phys_object *phys_obj; | ||
3392 | int ret; | ||
3393 | |||
3394 | if (dev_priv->mm.phys_objs[id - 1] || !size) | ||
3395 | return 0; | ||
3396 | |||
3397 | phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); | ||
3398 | if (!phys_obj) | ||
3399 | return -ENOMEM; | ||
3400 | |||
3401 | phys_obj->id = id; | ||
3402 | |||
3403 | phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); | ||
3404 | if (!phys_obj->handle) { | ||
3405 | ret = -ENOMEM; | ||
3406 | goto kfree_obj; | ||
3407 | } | ||
3408 | #ifdef CONFIG_X86 | ||
3409 | set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | ||
3410 | #endif | ||
3411 | |||
3412 | dev_priv->mm.phys_objs[id - 1] = phys_obj; | ||
3413 | |||
3414 | return 0; | ||
3415 | kfree_obj: | ||
3416 | drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); | ||
3417 | return ret; | ||
3418 | } | ||
3419 | |||
3420 | void i915_gem_free_phys_object(struct drm_device *dev, int id) | ||
3421 | { | ||
3422 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3423 | struct drm_i915_gem_phys_object *phys_obj; | ||
3424 | |||
3425 | if (!dev_priv->mm.phys_objs[id - 1]) | ||
3426 | return; | ||
3427 | |||
3428 | phys_obj = dev_priv->mm.phys_objs[id - 1]; | ||
3429 | if (phys_obj->cur_obj) { | ||
3430 | i915_gem_detach_phys_object(dev, phys_obj->cur_obj); | ||
3431 | } | ||
3432 | |||
3433 | #ifdef CONFIG_X86 | ||
3434 | set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | ||
3435 | #endif | ||
3436 | drm_pci_free(dev, phys_obj->handle); | ||
3437 | kfree(phys_obj); | ||
3438 | dev_priv->mm.phys_objs[id - 1] = NULL; | ||
3439 | } | ||
3440 | |||
3441 | void i915_gem_free_all_phys_object(struct drm_device *dev) | ||
3442 | { | ||
3443 | int i; | ||
3444 | |||
3445 | for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) | ||
3446 | i915_gem_free_phys_object(dev, i); | ||
3447 | } | ||
3448 | |||
3449 | void i915_gem_detach_phys_object(struct drm_device *dev, | ||
3450 | struct drm_gem_object *obj) | ||
3451 | { | ||
3452 | struct drm_i915_gem_object *obj_priv; | ||
3453 | int i; | ||
3454 | int ret; | ||
3455 | int page_count; | ||
3456 | |||
3457 | obj_priv = obj->driver_private; | ||
3458 | if (!obj_priv->phys_obj) | ||
3459 | return; | ||
3460 | |||
3461 | ret = i915_gem_object_get_page_list(obj); | ||
3462 | if (ret) | ||
3463 | goto out; | ||
3464 | |||
3465 | page_count = obj->size / PAGE_SIZE; | ||
3466 | |||
3467 | for (i = 0; i < page_count; i++) { | ||
3468 | char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); | ||
3469 | char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | ||
3470 | |||
3471 | memcpy(dst, src, PAGE_SIZE); | ||
3472 | kunmap_atomic(dst, KM_USER0); | ||
3473 | } | ||
3474 | drm_clflush_pages(obj_priv->page_list, page_count); | ||
3475 | drm_agp_chipset_flush(dev); | ||
3476 | out: | ||
3477 | obj_priv->phys_obj->cur_obj = NULL; | ||
3478 | obj_priv->phys_obj = NULL; | ||
3479 | } | ||
3480 | |||
3481 | int | ||
3482 | i915_gem_attach_phys_object(struct drm_device *dev, | ||
3483 | struct drm_gem_object *obj, int id) | ||
3484 | { | ||
3485 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3486 | struct drm_i915_gem_object *obj_priv; | ||
3487 | int ret = 0; | ||
3488 | int page_count; | ||
3489 | int i; | ||
3490 | |||
3491 | if (id > I915_MAX_PHYS_OBJECT) | ||
3492 | return -EINVAL; | ||
3493 | |||
3494 | obj_priv = obj->driver_private; | ||
3495 | |||
3496 | if (obj_priv->phys_obj) { | ||
3497 | if (obj_priv->phys_obj->id == id) | ||
3498 | return 0; | ||
3499 | i915_gem_detach_phys_object(dev, obj); | ||
3500 | } | ||
3501 | |||
3502 | |||
3503 | /* create a new object */ | ||
3504 | if (!dev_priv->mm.phys_objs[id - 1]) { | ||
3505 | ret = i915_gem_init_phys_object(dev, id, | ||
3506 | obj->size); | ||
3507 | if (ret) { | ||
3508 | DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); | ||
3509 | goto out; | ||
3510 | } | ||
3511 | } | ||
3512 | |||
3513 | /* bind to the object */ | ||
3514 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | ||
3515 | obj_priv->phys_obj->cur_obj = obj; | ||
3516 | |||
3517 | ret = i915_gem_object_get_page_list(obj); | ||
3518 | if (ret) { | ||
3519 | DRM_ERROR("failed to get page list\n"); | ||
3520 | goto out; | ||
3521 | } | ||
3522 | |||
3523 | page_count = obj->size / PAGE_SIZE; | ||
3524 | |||
3525 | for (i = 0; i < page_count; i++) { | ||
3526 | char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); | ||
3527 | char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | ||
3528 | |||
3529 | memcpy(dst, src, PAGE_SIZE); | ||
3530 | kunmap_atomic(src, KM_USER0); | ||
3531 | } | ||
3532 | |||
3533 | return 0; | ||
3534 | out: | ||
3535 | return ret; | ||
3536 | } | ||
3537 | |||
3538 | static int | ||
3539 | i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | ||
3540 | struct drm_i915_gem_pwrite *args, | ||
3541 | struct drm_file *file_priv) | ||
3542 | { | ||
3543 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3544 | void *obj_addr; | ||
3545 | int ret; | ||
3546 | char __user *user_data; | ||
3547 | |||
3548 | user_data = (char __user *) (uintptr_t) args->data_ptr; | ||
3549 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; | ||
3550 | |||
3551 | DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); | ||
3552 | ret = copy_from_user(obj_addr, user_data, args->size); | ||
3553 | if (ret) | ||
3554 | return -EFAULT; | ||
3555 | |||
3556 | drm_agp_chipset_flush(dev); | ||
3557 | return 0; | ||
3558 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 241f39b7f460..7fb4191ef934 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -173,6 +173,73 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
173 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; | 173 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; |
174 | } | 174 | } |
175 | 175 | ||
176 | |||
177 | /** | ||
178 | * Returns the size of the fence for a tiled object of the given size. | ||
179 | */ | ||
180 | static int | ||
181 | i915_get_fence_size(struct drm_device *dev, int size) | ||
182 | { | ||
183 | int i; | ||
184 | int start; | ||
185 | |||
186 | if (IS_I965G(dev)) { | ||
187 | /* The 965 can have fences at any page boundary. */ | ||
188 | return ALIGN(size, 4096); | ||
189 | } else { | ||
190 | /* Align the size to a power of two greater than the smallest | ||
191 | * fence size. | ||
192 | */ | ||
193 | if (IS_I9XX(dev)) | ||
194 | start = 1024 * 1024; | ||
195 | else | ||
196 | start = 512 * 1024; | ||
197 | |||
198 | for (i = start; i < size; i <<= 1) | ||
199 | ; | ||
200 | |||
201 | return i; | ||
202 | } | ||
203 | } | ||
204 | |||
205 | /* Check pitch constriants for all chips & tiling formats */ | ||
206 | static bool | ||
207 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | ||
208 | { | ||
209 | int tile_width; | ||
210 | |||
211 | /* Linear is always fine */ | ||
212 | if (tiling_mode == I915_TILING_NONE) | ||
213 | return true; | ||
214 | |||
215 | if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) | ||
216 | tile_width = 128; | ||
217 | else | ||
218 | tile_width = 512; | ||
219 | |||
220 | /* 965+ just needs multiples of tile width */ | ||
221 | if (IS_I965G(dev)) { | ||
222 | if (stride & (tile_width - 1)) | ||
223 | return false; | ||
224 | return true; | ||
225 | } | ||
226 | |||
227 | /* Pre-965 needs power of two tile widths */ | ||
228 | if (stride < tile_width) | ||
229 | return false; | ||
230 | |||
231 | if (stride & (stride - 1)) | ||
232 | return false; | ||
233 | |||
234 | /* We don't handle the aperture area covered by the fence being bigger | ||
235 | * than the object size. | ||
236 | */ | ||
237 | if (i915_get_fence_size(dev, size) != size) | ||
238 | return false; | ||
239 | |||
240 | return true; | ||
241 | } | ||
242 | |||
176 | /** | 243 | /** |
177 | * Sets the tiling mode of an object, returning the required swizzling of | 244 | * Sets the tiling mode of an object, returning the required swizzling of |
178 | * bit 6 of addresses in the object. | 245 | * bit 6 of addresses in the object. |
@@ -191,6 +258,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
191 | return -EINVAL; | 258 | return -EINVAL; |
192 | obj_priv = obj->driver_private; | 259 | obj_priv = obj->driver_private; |
193 | 260 | ||
261 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { | ||
262 | drm_gem_object_unreference(obj); | ||
263 | return -EINVAL; | ||
264 | } | ||
265 | |||
194 | mutex_lock(&dev->struct_mutex); | 266 | mutex_lock(&dev->struct_mutex); |
195 | 267 | ||
196 | if (args->tiling_mode == I915_TILING_NONE) { | 268 | if (args->tiling_mode == I915_TILING_NONE) { |
@@ -207,12 +279,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
207 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; | 279 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
208 | } | 280 | } |
209 | } | 281 | } |
210 | obj_priv->tiling_mode = args->tiling_mode; | 282 | if (args->tiling_mode != obj_priv->tiling_mode) { |
211 | obj_priv->stride = args->stride; | 283 | int ret; |
212 | 284 | ||
213 | mutex_unlock(&dev->struct_mutex); | 285 | /* Unbind the object, as switching tiling means we're |
286 | * switching the cache organization due to fencing, probably. | ||
287 | */ | ||
288 | ret = i915_gem_object_unbind(obj); | ||
289 | if (ret != 0) { | ||
290 | WARN(ret != -ERESTARTSYS, | ||
291 | "failed to unbind object for tiling switch"); | ||
292 | args->tiling_mode = obj_priv->tiling_mode; | ||
293 | mutex_unlock(&dev->struct_mutex); | ||
294 | drm_gem_object_unreference(obj); | ||
295 | |||
296 | return ret; | ||
297 | } | ||
298 | obj_priv->tiling_mode = args->tiling_mode; | ||
299 | } | ||
300 | obj_priv->stride = args->stride; | ||
214 | 301 | ||
215 | drm_gem_object_unreference(obj); | 302 | drm_gem_object_unreference(obj); |
303 | mutex_unlock(&dev->struct_mutex); | ||
216 | 304 | ||
217 | return 0; | 305 | return 0; |
218 | } | 306 | } |
@@ -251,9 +339,8 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
251 | DRM_ERROR("unknown tiling mode\n"); | 339 | DRM_ERROR("unknown tiling mode\n"); |
252 | } | 340 | } |
253 | 341 | ||
254 | mutex_unlock(&dev->struct_mutex); | ||
255 | |||
256 | drm_gem_object_unreference(obj); | 342 | drm_gem_object_unreference(obj); |
343 | mutex_unlock(&dev->struct_mutex); | ||
257 | 344 | ||
258 | return 0; | 345 | return 0; |
259 | } | 346 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0cadafbef411..87b6b603469e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -174,6 +174,19 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
174 | return count; | 174 | return count; |
175 | } | 175 | } |
176 | 176 | ||
177 | u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | ||
178 | { | ||
179 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
180 | int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; | ||
181 | |||
182 | if (!i915_pipe_enabled(dev, pipe)) { | ||
183 | DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | return I915_READ(reg); | ||
188 | } | ||
189 | |||
177 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | 190 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) |
178 | { | 191 | { |
179 | struct drm_device *dev = (struct drm_device *) arg; | 192 | struct drm_device *dev = (struct drm_device *) arg; |
@@ -370,12 +383,13 @@ int i915_irq_emit(struct drm_device *dev, void *data, | |||
370 | drm_i915_irq_emit_t *emit = data; | 383 | drm_i915_irq_emit_t *emit = data; |
371 | int result; | 384 | int result; |
372 | 385 | ||
373 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
374 | |||
375 | if (!dev_priv) { | 386 | if (!dev_priv) { |
376 | DRM_ERROR("called with no initialization\n"); | 387 | DRM_ERROR("called with no initialization\n"); |
377 | return -EINVAL; | 388 | return -EINVAL; |
378 | } | 389 | } |
390 | |||
391 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
392 | |||
379 | mutex_lock(&dev->struct_mutex); | 393 | mutex_lock(&dev->struct_mutex); |
380 | result = i915_emit_irq(dev); | 394 | result = i915_emit_irq(dev); |
381 | mutex_unlock(&dev->struct_mutex); | 395 | mutex_unlock(&dev->struct_mutex); |
@@ -411,6 +425,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
411 | { | 425 | { |
412 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 426 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
413 | unsigned long irqflags; | 427 | unsigned long irqflags; |
428 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
429 | u32 pipeconf; | ||
430 | |||
431 | pipeconf = I915_READ(pipeconf_reg); | ||
432 | if (!(pipeconf & PIPEACONF_ENABLE)) | ||
433 | return -EINVAL; | ||
414 | 434 | ||
415 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 435 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
416 | if (IS_I965G(dev)) | 436 | if (IS_I965G(dev)) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 273162579e1b..9d6539a868b3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -186,12 +186,12 @@ | |||
186 | #define FENCE_REG_830_0 0x2000 | 186 | #define FENCE_REG_830_0 0x2000 |
187 | #define I830_FENCE_START_MASK 0x07f80000 | 187 | #define I830_FENCE_START_MASK 0x07f80000 |
188 | #define I830_FENCE_TILING_Y_SHIFT 12 | 188 | #define I830_FENCE_TILING_Y_SHIFT 12 |
189 | #define I830_FENCE_SIZE_BITS(size) ((get_order(size >> 19) - 1) << 8) | 189 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) |
190 | #define I830_FENCE_PITCH_SHIFT 4 | 190 | #define I830_FENCE_PITCH_SHIFT 4 |
191 | #define I830_FENCE_REG_VALID (1<<0) | 191 | #define I830_FENCE_REG_VALID (1<<0) |
192 | 192 | ||
193 | #define I915_FENCE_START_MASK 0x0ff00000 | 193 | #define I915_FENCE_START_MASK 0x0ff00000 |
194 | #define I915_FENCE_SIZE_BITS(size) ((get_order(size >> 20) - 1) << 8) | 194 | #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) |
195 | 195 | ||
196 | #define FENCE_REG_965_0 0x03000 | 196 | #define FENCE_REG_965_0 0x03000 |
197 | #define I965_FENCE_PITCH_SHIFT 2 | 197 | #define I965_FENCE_PITCH_SHIFT 2 |
@@ -1371,6 +1371,9 @@ | |||
1371 | #define PIPE_FRAME_LOW_SHIFT 24 | 1371 | #define PIPE_FRAME_LOW_SHIFT 24 |
1372 | #define PIPE_PIXEL_MASK 0x00ffffff | 1372 | #define PIPE_PIXEL_MASK 0x00ffffff |
1373 | #define PIPE_PIXEL_SHIFT 0 | 1373 | #define PIPE_PIXEL_SHIFT 0 |
1374 | /* GM45+ just has to be different */ | ||
1375 | #define PIPEA_FRMCOUNT_GM45 0x70040 | ||
1376 | #define PIPEA_FLIPCOUNT_GM45 0x70044 | ||
1374 | 1377 | ||
1375 | /* Cursor A & B regs */ | 1378 | /* Cursor A & B regs */ |
1376 | #define CURACNTR 0x70080 | 1379 | #define CURACNTR 0x70080 |
@@ -1439,6 +1442,9 @@ | |||
1439 | #define PIPEBSTAT 0x71024 | 1442 | #define PIPEBSTAT 0x71024 |
1440 | #define PIPEBFRAMEHIGH 0x71040 | 1443 | #define PIPEBFRAMEHIGH 0x71040 |
1441 | #define PIPEBFRAMEPIXEL 0x71044 | 1444 | #define PIPEBFRAMEPIXEL 0x71044 |
1445 | #define PIPEB_FRMCOUNT_GM45 0x71040 | ||
1446 | #define PIPEB_FLIPCOUNT_GM45 0x71044 | ||
1447 | |||
1442 | 1448 | ||
1443 | /* Display B control */ | 1449 | /* Display B control */ |
1444 | #define DSPBCNTR 0x71180 | 1450 | #define DSPBCNTR 0x71180 |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 4ca82a025525..fc28e2bbd542 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -111,6 +111,12 @@ parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | |||
111 | panel_fixed_mode->clock = dvo_timing->clock * 10; | 111 | panel_fixed_mode->clock = dvo_timing->clock * 10; |
112 | panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; | 112 | panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; |
113 | 113 | ||
114 | /* Some VBTs have bogus h/vtotal values */ | ||
115 | if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) | ||
116 | panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; | ||
117 | if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) | ||
118 | panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; | ||
119 | |||
114 | drm_mode_set_name(panel_fixed_mode); | 120 | drm_mode_set_name(panel_fixed_mode); |
115 | 121 | ||
116 | dev_priv->vbt_mode = panel_fixed_mode; | 122 | dev_priv->vbt_mode = panel_fixed_mode; |
@@ -135,6 +141,14 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
135 | if (general) { | 141 | if (general) { |
136 | dev_priv->int_tv_support = general->int_tv_support; | 142 | dev_priv->int_tv_support = general->int_tv_support; |
137 | dev_priv->int_crt_support = general->int_crt_support; | 143 | dev_priv->int_crt_support = general->int_crt_support; |
144 | dev_priv->lvds_use_ssc = general->enable_ssc; | ||
145 | |||
146 | if (dev_priv->lvds_use_ssc) { | ||
147 | if (IS_I855(dev_priv->dev)) | ||
148 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; | ||
149 | else | ||
150 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; | ||
151 | } | ||
138 | } | 152 | } |
139 | } | 153 | } |
140 | 154 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8ccb9c3ab868..a2834276cb38 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -90,12 +90,12 @@ typedef struct { | |||
90 | #define I9XX_DOT_MAX 400000 | 90 | #define I9XX_DOT_MAX 400000 |
91 | #define I9XX_VCO_MIN 1400000 | 91 | #define I9XX_VCO_MIN 1400000 |
92 | #define I9XX_VCO_MAX 2800000 | 92 | #define I9XX_VCO_MAX 2800000 |
93 | #define I9XX_N_MIN 3 | 93 | #define I9XX_N_MIN 1 |
94 | #define I9XX_N_MAX 8 | 94 | #define I9XX_N_MAX 6 |
95 | #define I9XX_M_MIN 70 | 95 | #define I9XX_M_MIN 70 |
96 | #define I9XX_M_MAX 120 | 96 | #define I9XX_M_MAX 120 |
97 | #define I9XX_M1_MIN 10 | 97 | #define I9XX_M1_MIN 10 |
98 | #define I9XX_M1_MAX 20 | 98 | #define I9XX_M1_MAX 22 |
99 | #define I9XX_M2_MIN 5 | 99 | #define I9XX_M2_MIN 5 |
100 | #define I9XX_M2_MAX 9 | 100 | #define I9XX_M2_MAX 9 |
101 | #define I9XX_P_SDVO_DAC_MIN 5 | 101 | #define I9XX_P_SDVO_DAC_MIN 5 |
@@ -189,19 +189,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | |||
189 | return limit; | 189 | return limit; |
190 | } | 190 | } |
191 | 191 | ||
192 | /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ | 192 | static void intel_clock(int refclk, intel_clock_t *clock) |
193 | |||
194 | static void i8xx_clock(int refclk, intel_clock_t *clock) | ||
195 | { | ||
196 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); | ||
197 | clock->p = clock->p1 * clock->p2; | ||
198 | clock->vco = refclk * clock->m / (clock->n + 2); | ||
199 | clock->dot = clock->vco / clock->p; | ||
200 | } | ||
201 | |||
202 | /** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ | ||
203 | |||
204 | static void i9xx_clock(int refclk, intel_clock_t *clock) | ||
205 | { | 193 | { |
206 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); | 194 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); |
207 | clock->p = clock->p1 * clock->p2; | 195 | clock->p = clock->p1 * clock->p2; |
@@ -209,15 +197,6 @@ static void i9xx_clock(int refclk, intel_clock_t *clock) | |||
209 | clock->dot = clock->vco / clock->p; | 197 | clock->dot = clock->vco / clock->p; |
210 | } | 198 | } |
211 | 199 | ||
212 | static void intel_clock(struct drm_device *dev, int refclk, | ||
213 | intel_clock_t *clock) | ||
214 | { | ||
215 | if (IS_I9XX(dev)) | ||
216 | i9xx_clock (refclk, clock); | ||
217 | else | ||
218 | i8xx_clock (refclk, clock); | ||
219 | } | ||
220 | |||
221 | /** | 200 | /** |
222 | * Returns whether any output on the specified pipe is of the specified type | 201 | * Returns whether any output on the specified pipe is of the specified type |
223 | */ | 202 | */ |
@@ -238,7 +217,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
238 | return false; | 217 | return false; |
239 | } | 218 | } |
240 | 219 | ||
241 | #define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } | 220 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
242 | /** | 221 | /** |
243 | * Returns whether the given set of divisors are valid for a given refclk with | 222 | * Returns whether the given set of divisors are valid for a given refclk with |
244 | * the given connectors. | 223 | * the given connectors. |
@@ -318,7 +297,7 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, | |||
318 | clock.p1 <= limit->p1.max; clock.p1++) { | 297 | clock.p1 <= limit->p1.max; clock.p1++) { |
319 | int this_err; | 298 | int this_err; |
320 | 299 | ||
321 | intel_clock(dev, refclk, &clock); | 300 | intel_clock(refclk, &clock); |
322 | 301 | ||
323 | if (!intel_PLL_is_valid(crtc, &clock)) | 302 | if (!intel_PLL_is_valid(crtc, &clock)) |
324 | continue; | 303 | continue; |
@@ -343,7 +322,7 @@ intel_wait_for_vblank(struct drm_device *dev) | |||
343 | udelay(20000); | 322 | udelay(20000); |
344 | } | 323 | } |
345 | 324 | ||
346 | static void | 325 | static int |
347 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | 326 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
348 | struct drm_framebuffer *old_fb) | 327 | struct drm_framebuffer *old_fb) |
349 | { | 328 | { |
@@ -361,11 +340,21 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
361 | int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; | 340 | int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; |
362 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | 341 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; |
363 | u32 dspcntr, alignment; | 342 | u32 dspcntr, alignment; |
343 | int ret; | ||
364 | 344 | ||
365 | /* no fb bound */ | 345 | /* no fb bound */ |
366 | if (!crtc->fb) { | 346 | if (!crtc->fb) { |
367 | DRM_DEBUG("No FB bound\n"); | 347 | DRM_DEBUG("No FB bound\n"); |
368 | return; | 348 | return 0; |
349 | } | ||
350 | |||
351 | switch (pipe) { | ||
352 | case 0: | ||
353 | case 1: | ||
354 | break; | ||
355 | default: | ||
356 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | ||
357 | return -EINVAL; | ||
369 | } | 358 | } |
370 | 359 | ||
371 | intel_fb = to_intel_framebuffer(crtc->fb); | 360 | intel_fb = to_intel_framebuffer(crtc->fb); |
@@ -377,30 +366,34 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
377 | alignment = 64 * 1024; | 366 | alignment = 64 * 1024; |
378 | break; | 367 | break; |
379 | case I915_TILING_X: | 368 | case I915_TILING_X: |
380 | if (IS_I9XX(dev)) | 369 | /* pin() will align the object as required by fence */ |
381 | alignment = 1024 * 1024; | 370 | alignment = 0; |
382 | else | ||
383 | alignment = 512 * 1024; | ||
384 | break; | 371 | break; |
385 | case I915_TILING_Y: | 372 | case I915_TILING_Y: |
386 | /* FIXME: Is this true? */ | 373 | /* FIXME: Is this true? */ |
387 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); | 374 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); |
388 | return; | 375 | return -EINVAL; |
389 | default: | 376 | default: |
390 | BUG(); | 377 | BUG(); |
391 | } | 378 | } |
392 | 379 | ||
393 | if (i915_gem_object_pin(intel_fb->obj, alignment)) | 380 | mutex_lock(&dev->struct_mutex); |
394 | return; | 381 | ret = i915_gem_object_pin(intel_fb->obj, alignment); |
395 | 382 | if (ret != 0) { | |
396 | i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); | 383 | mutex_unlock(&dev->struct_mutex); |
397 | 384 | return ret; | |
398 | Start = obj_priv->gtt_offset; | 385 | } |
399 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); | ||
400 | 386 | ||
401 | I915_WRITE(dspstride, crtc->fb->pitch); | 387 | ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); |
388 | if (ret != 0) { | ||
389 | i915_gem_object_unpin(intel_fb->obj); | ||
390 | mutex_unlock(&dev->struct_mutex); | ||
391 | return ret; | ||
392 | } | ||
402 | 393 | ||
403 | dspcntr = I915_READ(dspcntr_reg); | 394 | dspcntr = I915_READ(dspcntr_reg); |
395 | /* Mask out pixel format bits in case we change it */ | ||
396 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | ||
404 | switch (crtc->fb->bits_per_pixel) { | 397 | switch (crtc->fb->bits_per_pixel) { |
405 | case 8: | 398 | case 8: |
406 | dspcntr |= DISPPLANE_8BPP; | 399 | dspcntr |= DISPPLANE_8BPP; |
@@ -417,11 +410,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
417 | break; | 410 | break; |
418 | default: | 411 | default: |
419 | DRM_ERROR("Unknown color depth\n"); | 412 | DRM_ERROR("Unknown color depth\n"); |
420 | return; | 413 | i915_gem_object_unpin(intel_fb->obj); |
414 | mutex_unlock(&dev->struct_mutex); | ||
415 | return -EINVAL; | ||
421 | } | 416 | } |
422 | I915_WRITE(dspcntr_reg, dspcntr); | 417 | I915_WRITE(dspcntr_reg, dspcntr); |
423 | 418 | ||
419 | Start = obj_priv->gtt_offset; | ||
420 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); | ||
421 | |||
424 | DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); | 422 | DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); |
423 | I915_WRITE(dspstride, crtc->fb->pitch); | ||
425 | if (IS_I965G(dev)) { | 424 | if (IS_I965G(dev)) { |
426 | I915_WRITE(dspbase, Offset); | 425 | I915_WRITE(dspbase, Offset); |
427 | I915_READ(dspbase); | 426 | I915_READ(dspbase); |
@@ -438,27 +437,24 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
438 | intel_fb = to_intel_framebuffer(old_fb); | 437 | intel_fb = to_intel_framebuffer(old_fb); |
439 | i915_gem_object_unpin(intel_fb->obj); | 438 | i915_gem_object_unpin(intel_fb->obj); |
440 | } | 439 | } |
440 | mutex_unlock(&dev->struct_mutex); | ||
441 | 441 | ||
442 | if (!dev->primary->master) | 442 | if (!dev->primary->master) |
443 | return; | 443 | return 0; |
444 | 444 | ||
445 | master_priv = dev->primary->master->driver_priv; | 445 | master_priv = dev->primary->master->driver_priv; |
446 | if (!master_priv->sarea_priv) | 446 | if (!master_priv->sarea_priv) |
447 | return; | 447 | return 0; |
448 | 448 | ||
449 | switch (pipe) { | 449 | if (pipe) { |
450 | case 0: | ||
451 | master_priv->sarea_priv->pipeA_x = x; | ||
452 | master_priv->sarea_priv->pipeA_y = y; | ||
453 | break; | ||
454 | case 1: | ||
455 | master_priv->sarea_priv->pipeB_x = x; | 450 | master_priv->sarea_priv->pipeB_x = x; |
456 | master_priv->sarea_priv->pipeB_y = y; | 451 | master_priv->sarea_priv->pipeB_y = y; |
457 | break; | 452 | } else { |
458 | default: | 453 | master_priv->sarea_priv->pipeA_x = x; |
459 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | 454 | master_priv->sarea_priv->pipeA_y = y; |
460 | break; | ||
461 | } | 455 | } |
456 | |||
457 | return 0; | ||
462 | } | 458 | } |
463 | 459 | ||
464 | 460 | ||
@@ -706,11 +702,11 @@ static int intel_panel_fitter_pipe (struct drm_device *dev) | |||
706 | return 1; | 702 | return 1; |
707 | } | 703 | } |
708 | 704 | ||
709 | static void intel_crtc_mode_set(struct drm_crtc *crtc, | 705 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
710 | struct drm_display_mode *mode, | 706 | struct drm_display_mode *mode, |
711 | struct drm_display_mode *adjusted_mode, | 707 | struct drm_display_mode *adjusted_mode, |
712 | int x, int y, | 708 | int x, int y, |
713 | struct drm_framebuffer *old_fb) | 709 | struct drm_framebuffer *old_fb) |
714 | { | 710 | { |
715 | struct drm_device *dev = crtc->dev; | 711 | struct drm_device *dev = crtc->dev; |
716 | struct drm_i915_private *dev_priv = dev->dev_private; | 712 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -730,13 +726,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, | |||
730 | int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; | 726 | int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; |
731 | int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; | 727 | int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; |
732 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | 728 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; |
733 | int refclk; | 729 | int refclk, num_outputs = 0; |
734 | intel_clock_t clock; | 730 | intel_clock_t clock; |
735 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; | 731 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; |
736 | bool ok, is_sdvo = false, is_dvo = false; | 732 | bool ok, is_sdvo = false, is_dvo = false; |
737 | bool is_crt = false, is_lvds = false, is_tv = false; | 733 | bool is_crt = false, is_lvds = false, is_tv = false; |
738 | struct drm_mode_config *mode_config = &dev->mode_config; | 734 | struct drm_mode_config *mode_config = &dev->mode_config; |
739 | struct drm_connector *connector; | 735 | struct drm_connector *connector; |
736 | int ret; | ||
740 | 737 | ||
741 | drm_vblank_pre_modeset(dev, pipe); | 738 | drm_vblank_pre_modeset(dev, pipe); |
742 | 739 | ||
@@ -753,6 +750,8 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, | |||
753 | case INTEL_OUTPUT_SDVO: | 750 | case INTEL_OUTPUT_SDVO: |
754 | case INTEL_OUTPUT_HDMI: | 751 | case INTEL_OUTPUT_HDMI: |
755 | is_sdvo = true; | 752 | is_sdvo = true; |
753 | if (intel_output->needs_tv_clock) | ||
754 | is_tv = true; | ||
756 | break; | 755 | break; |
757 | case INTEL_OUTPUT_DVO: | 756 | case INTEL_OUTPUT_DVO: |
758 | is_dvo = true; | 757 | is_dvo = true; |
@@ -764,9 +763,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, | |||
764 | is_crt = true; | 763 | is_crt = true; |
765 | break; | 764 | break; |
766 | } | 765 | } |
766 | |||
767 | num_outputs++; | ||
767 | } | 768 | } |
768 | 769 | ||
769 | if (IS_I9XX(dev)) { | 770 | if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { |
771 | refclk = dev_priv->lvds_ssc_freq * 1000; | ||
772 | DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); | ||
773 | } else if (IS_I9XX(dev)) { | ||
770 | refclk = 96000; | 774 | refclk = 96000; |
771 | } else { | 775 | } else { |
772 | refclk = 48000; | 776 | refclk = 48000; |
@@ -775,7 +779,7 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, | |||
775 | ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); | 779 | ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); |
776 | if (!ok) { | 780 | if (!ok) { |
777 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 781 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
778 | return; | 782 | return -EINVAL; |
779 | } | 783 | } |
780 | 784 | ||
781 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | 785 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
@@ -825,11 +829,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, | |||
825 | } | 829 | } |
826 | } | 830 | } |
827 | 831 | ||
828 | if (is_tv) { | 832 | if (is_sdvo && is_tv) |
833 | dpll |= PLL_REF_INPUT_TVCLKINBC; | ||
834 | else if (is_tv) | ||
829 | /* XXX: just matching BIOS for now */ | 835 | /* XXX: just matching BIOS for now */ |
830 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | 836 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
831 | dpll |= 3; | 837 | dpll |= 3; |
832 | } | 838 | else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) |
839 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | ||
833 | else | 840 | else |
834 | dpll |= PLL_REF_INPUT_DREFCLK; | 841 | dpll |= PLL_REF_INPUT_DREFCLK; |
835 | 842 | ||
@@ -946,9 +953,13 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, | |||
946 | I915_WRITE(dspcntr_reg, dspcntr); | 953 | I915_WRITE(dspcntr_reg, dspcntr); |
947 | 954 | ||
948 | /* Flush the plane changes */ | 955 | /* Flush the plane changes */ |
949 | intel_pipe_set_base(crtc, x, y, old_fb); | 956 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
957 | if (ret != 0) | ||
958 | return ret; | ||
950 | 959 | ||
951 | drm_vblank_post_modeset(dev, pipe); | 960 | drm_vblank_post_modeset(dev, pipe); |
961 | |||
962 | return 0; | ||
952 | } | 963 | } |
953 | 964 | ||
954 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ | 965 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ |
@@ -997,6 +1008,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
997 | temp = CURSOR_MODE_DISABLE; | 1008 | temp = CURSOR_MODE_DISABLE; |
998 | addr = 0; | 1009 | addr = 0; |
999 | bo = NULL; | 1010 | bo = NULL; |
1011 | mutex_lock(&dev->struct_mutex); | ||
1000 | goto finish; | 1012 | goto finish; |
1001 | } | 1013 | } |
1002 | 1014 | ||
@@ -1014,21 +1026,26 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
1014 | 1026 | ||
1015 | if (bo->size < width * height * 4) { | 1027 | if (bo->size < width * height * 4) { |
1016 | DRM_ERROR("buffer is to small\n"); | 1028 | DRM_ERROR("buffer is to small\n"); |
1017 | drm_gem_object_unreference(bo); | 1029 | ret = -ENOMEM; |
1018 | return -ENOMEM; | 1030 | goto fail; |
1019 | } | 1031 | } |
1020 | 1032 | ||
1021 | if (dev_priv->cursor_needs_physical) { | 1033 | /* we only need to pin inside GTT if cursor is non-phy */ |
1022 | addr = dev->agp->base + obj_priv->gtt_offset; | 1034 | mutex_lock(&dev->struct_mutex); |
1023 | } else { | 1035 | if (!dev_priv->cursor_needs_physical) { |
1036 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | ||
1037 | if (ret) { | ||
1038 | DRM_ERROR("failed to pin cursor bo\n"); | ||
1039 | goto fail_locked; | ||
1040 | } | ||
1024 | addr = obj_priv->gtt_offset; | 1041 | addr = obj_priv->gtt_offset; |
1025 | } | 1042 | } else { |
1026 | 1043 | ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); | |
1027 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | 1044 | if (ret) { |
1028 | if (ret) { | 1045 | DRM_ERROR("failed to attach phys object\n"); |
1029 | DRM_ERROR("failed to pin cursor bo\n"); | 1046 | goto fail_locked; |
1030 | drm_gem_object_unreference(bo); | 1047 | } |
1031 | return ret; | 1048 | addr = obj_priv->phys_obj->handle->busaddr; |
1032 | } | 1049 | } |
1033 | 1050 | ||
1034 | temp = 0; | 1051 | temp = 0; |
@@ -1041,14 +1058,25 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
1041 | I915_WRITE(base, addr); | 1058 | I915_WRITE(base, addr); |
1042 | 1059 | ||
1043 | if (intel_crtc->cursor_bo) { | 1060 | if (intel_crtc->cursor_bo) { |
1044 | i915_gem_object_unpin(intel_crtc->cursor_bo); | 1061 | if (dev_priv->cursor_needs_physical) { |
1062 | if (intel_crtc->cursor_bo != bo) | ||
1063 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | ||
1064 | } else | ||
1065 | i915_gem_object_unpin(intel_crtc->cursor_bo); | ||
1045 | drm_gem_object_unreference(intel_crtc->cursor_bo); | 1066 | drm_gem_object_unreference(intel_crtc->cursor_bo); |
1046 | } | 1067 | } |
1068 | mutex_unlock(&dev->struct_mutex); | ||
1047 | 1069 | ||
1048 | intel_crtc->cursor_addr = addr; | 1070 | intel_crtc->cursor_addr = addr; |
1049 | intel_crtc->cursor_bo = bo; | 1071 | intel_crtc->cursor_bo = bo; |
1050 | 1072 | ||
1051 | return 0; | 1073 | return 0; |
1074 | fail: | ||
1075 | mutex_lock(&dev->struct_mutex); | ||
1076 | fail_locked: | ||
1077 | drm_gem_object_unreference(bo); | ||
1078 | mutex_unlock(&dev->struct_mutex); | ||
1079 | return ret; | ||
1052 | } | 1080 | } |
1053 | 1081 | ||
1054 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | 1082 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) |
@@ -1273,7 +1301,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
1273 | } | 1301 | } |
1274 | 1302 | ||
1275 | /* XXX: Handle the 100Mhz refclk */ | 1303 | /* XXX: Handle the 100Mhz refclk */ |
1276 | i9xx_clock(96000, &clock); | 1304 | intel_clock(96000, &clock); |
1277 | } else { | 1305 | } else { |
1278 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); | 1306 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); |
1279 | 1307 | ||
@@ -1285,9 +1313,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
1285 | if ((dpll & PLL_REF_INPUT_MASK) == | 1313 | if ((dpll & PLL_REF_INPUT_MASK) == |
1286 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { | 1314 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { |
1287 | /* XXX: might not be 66MHz */ | 1315 | /* XXX: might not be 66MHz */ |
1288 | i8xx_clock(66000, &clock); | 1316 | intel_clock(66000, &clock); |
1289 | } else | 1317 | } else |
1290 | i8xx_clock(48000, &clock); | 1318 | intel_clock(48000, &clock); |
1291 | } else { | 1319 | } else { |
1292 | if (dpll & PLL_P1_DIVIDE_BY_TWO) | 1320 | if (dpll & PLL_P1_DIVIDE_BY_TWO) |
1293 | clock.p1 = 2; | 1321 | clock.p1 = 2; |
@@ -1300,7 +1328,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
1300 | else | 1328 | else |
1301 | clock.p2 = 2; | 1329 | clock.p2 = 2; |
1302 | 1330 | ||
1303 | i8xx_clock(48000, &clock); | 1331 | intel_clock(48000, &clock); |
1304 | } | 1332 | } |
1305 | } | 1333 | } |
1306 | 1334 | ||
@@ -1435,6 +1463,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) | |||
1435 | 1463 | ||
1436 | static void intel_setup_outputs(struct drm_device *dev) | 1464 | static void intel_setup_outputs(struct drm_device *dev) |
1437 | { | 1465 | { |
1466 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1438 | struct drm_connector *connector; | 1467 | struct drm_connector *connector; |
1439 | 1468 | ||
1440 | intel_crt_init(dev); | 1469 | intel_crt_init(dev); |
@@ -1446,13 +1475,16 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
1446 | if (IS_I9XX(dev)) { | 1475 | if (IS_I9XX(dev)) { |
1447 | int found; | 1476 | int found; |
1448 | 1477 | ||
1449 | found = intel_sdvo_init(dev, SDVOB); | 1478 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
1450 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 1479 | found = intel_sdvo_init(dev, SDVOB); |
1451 | intel_hdmi_init(dev, SDVOB); | 1480 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
1452 | 1481 | intel_hdmi_init(dev, SDVOB); | |
1453 | found = intel_sdvo_init(dev, SDVOC); | 1482 | } |
1454 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 1483 | if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { |
1455 | intel_hdmi_init(dev, SDVOC); | 1484 | found = intel_sdvo_init(dev, SDVOC); |
1485 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | ||
1486 | intel_hdmi_init(dev, SDVOC); | ||
1487 | } | ||
1456 | } else | 1488 | } else |
1457 | intel_dvo_init(dev); | 1489 | intel_dvo_init(dev); |
1458 | 1490 | ||
@@ -1575,7 +1607,9 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
1575 | 1607 | ||
1576 | ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); | 1608 | ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); |
1577 | if (ret) { | 1609 | if (ret) { |
1610 | mutex_lock(&dev->struct_mutex); | ||
1578 | drm_gem_object_unreference(obj); | 1611 | drm_gem_object_unreference(obj); |
1612 | mutex_unlock(&dev->struct_mutex); | ||
1579 | return NULL; | 1613 | return NULL; |
1580 | } | 1614 | } |
1581 | 1615 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8a4cc50c5b4e..957daef8edff 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -82,6 +82,7 @@ struct intel_output { | |||
82 | struct intel_i2c_chan *i2c_bus; /* for control functions */ | 82 | struct intel_i2c_chan *i2c_bus; /* for control functions */ |
83 | struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ | 83 | struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ |
84 | bool load_detect_temp; | 84 | bool load_detect_temp; |
85 | bool needs_tv_clock; | ||
85 | void *dev_priv; | 86 | void *dev_priv; |
86 | }; | 87 | }; |
87 | 88 | ||
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index afd1217b8a02..b7f0ebe9f810 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -473,7 +473,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
473 | ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); | 473 | ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); |
474 | if (ret) { | 474 | if (ret) { |
475 | DRM_ERROR("failed to allocate fb.\n"); | 475 | DRM_ERROR("failed to allocate fb.\n"); |
476 | goto out_unref; | 476 | goto out_unpin; |
477 | } | 477 | } |
478 | 478 | ||
479 | list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); | 479 | list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); |
@@ -484,7 +484,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
484 | info = framebuffer_alloc(sizeof(struct intelfb_par), device); | 484 | info = framebuffer_alloc(sizeof(struct intelfb_par), device); |
485 | if (!info) { | 485 | if (!info) { |
486 | ret = -ENOMEM; | 486 | ret = -ENOMEM; |
487 | goto out_unref; | 487 | goto out_unpin; |
488 | } | 488 | } |
489 | 489 | ||
490 | par = info->par; | 490 | par = info->par; |
@@ -513,7 +513,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
513 | size); | 513 | size); |
514 | if (!info->screen_base) { | 514 | if (!info->screen_base) { |
515 | ret = -ENOSPC; | 515 | ret = -ENOSPC; |
516 | goto out_unref; | 516 | goto out_unpin; |
517 | } | 517 | } |
518 | info->screen_size = size; | 518 | info->screen_size = size; |
519 | 519 | ||
@@ -608,6 +608,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
608 | mutex_unlock(&dev->struct_mutex); | 608 | mutex_unlock(&dev->struct_mutex); |
609 | return 0; | 609 | return 0; |
610 | 610 | ||
611 | out_unpin: | ||
612 | i915_gem_object_unpin(fbo); | ||
611 | out_unref: | 613 | out_unref: |
612 | drm_gem_object_unreference(fbo); | 614 | drm_gem_object_unreference(fbo); |
613 | mutex_unlock(&dev->struct_mutex); | 615 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index a5a2f5339e9e..5ee9d4c25753 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -137,10 +137,6 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, | |||
137 | chan->reg = reg; | 137 | chan->reg = reg; |
138 | snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); | 138 | snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); |
139 | chan->adapter.owner = THIS_MODULE; | 139 | chan->adapter.owner = THIS_MODULE; |
140 | #ifndef I2C_HW_B_INTELFB | ||
141 | #define I2C_HW_B_INTELFB I2C_HW_B_I810 | ||
142 | #endif | ||
143 | chan->adapter.id = I2C_HW_B_INTELFB; | ||
144 | chan->adapter.algo_data = &chan->algo; | 140 | chan->adapter.algo_data = &chan->algo; |
145 | chan->adapter.dev.parent = &dev->pdev->dev; | 141 | chan->adapter.dev.parent = &dev->pdev->dev; |
146 | chan->algo.setsda = set_data; | 142 | chan->algo.setsda = set_data; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index ccecfaf6307b..0d211af98854 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -27,6 +27,7 @@ | |||
27 | * Jesse Barnes <jesse.barnes@intel.com> | 27 | * Jesse Barnes <jesse.barnes@intel.com> |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/dmi.h> | ||
30 | #include <linux/i2c.h> | 31 | #include <linux/i2c.h> |
31 | #include "drmP.h" | 32 | #include "drmP.h" |
32 | #include "drm.h" | 33 | #include "drm.h" |
@@ -311,10 +312,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
311 | if (dev_priv->panel_fixed_mode != NULL) { | 312 | if (dev_priv->panel_fixed_mode != NULL) { |
312 | struct drm_display_mode *mode; | 313 | struct drm_display_mode *mode; |
313 | 314 | ||
314 | mutex_unlock(&dev->mode_config.mutex); | ||
315 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 315 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); |
316 | drm_mode_probed_add(connector, mode); | 316 | drm_mode_probed_add(connector, mode); |
317 | mutex_unlock(&dev->mode_config.mutex); | ||
318 | 317 | ||
319 | return 1; | 318 | return 1; |
320 | } | 319 | } |
@@ -340,6 +339,18 @@ static void intel_lvds_destroy(struct drm_connector *connector) | |||
340 | kfree(connector); | 339 | kfree(connector); |
341 | } | 340 | } |
342 | 341 | ||
342 | static int intel_lvds_set_property(struct drm_connector *connector, | ||
343 | struct drm_property *property, | ||
344 | uint64_t value) | ||
345 | { | ||
346 | struct drm_device *dev = connector->dev; | ||
347 | |||
348 | if (property == dev->mode_config.dpms_property && connector->encoder) | ||
349 | intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf)); | ||
350 | |||
351 | return 0; | ||
352 | } | ||
353 | |||
343 | static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { | 354 | static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { |
344 | .dpms = intel_lvds_dpms, | 355 | .dpms = intel_lvds_dpms, |
345 | .mode_fixup = intel_lvds_mode_fixup, | 356 | .mode_fixup = intel_lvds_mode_fixup, |
@@ -359,6 +370,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { | |||
359 | .restore = intel_lvds_restore, | 370 | .restore = intel_lvds_restore, |
360 | .detect = intel_lvds_detect, | 371 | .detect = intel_lvds_detect, |
361 | .fill_modes = drm_helper_probe_single_connector_modes, | 372 | .fill_modes = drm_helper_probe_single_connector_modes, |
373 | .set_property = intel_lvds_set_property, | ||
362 | .destroy = intel_lvds_destroy, | 374 | .destroy = intel_lvds_destroy, |
363 | }; | 375 | }; |
364 | 376 | ||
@@ -392,6 +404,16 @@ void intel_lvds_init(struct drm_device *dev) | |||
392 | u32 lvds; | 404 | u32 lvds; |
393 | int pipe; | 405 | int pipe; |
394 | 406 | ||
407 | /* Blacklist machines that we know falsely report LVDS. */ | ||
408 | /* FIXME: add a check for the Aopen Mini PC */ | ||
409 | |||
410 | /* Apple Mac Mini Core Duo and Mac Mini Core 2 Duo */ | ||
411 | if(dmi_match(DMI_PRODUCT_NAME, "Macmini1,1") || | ||
412 | dmi_match(DMI_PRODUCT_NAME, "Macmini2,1")) { | ||
413 | DRM_DEBUG("Skipping LVDS initialization for Apple Mac Mini\n"); | ||
414 | return; | ||
415 | } | ||
416 | |||
395 | intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); | 417 | intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); |
396 | if (!intel_output) { | 418 | if (!intel_output) { |
397 | return; | 419 | return; |
@@ -445,7 +467,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
445 | dev_priv->panel_fixed_mode = | 467 | dev_priv->panel_fixed_mode = |
446 | drm_mode_duplicate(dev, scan); | 468 | drm_mode_duplicate(dev, scan); |
447 | mutex_unlock(&dev->mode_config.mutex); | 469 | mutex_unlock(&dev->mode_config.mutex); |
448 | goto out; /* FIXME: check for quirks */ | 470 | goto out; |
449 | } | 471 | } |
450 | mutex_unlock(&dev->mode_config.mutex); | 472 | mutex_unlock(&dev->mode_config.mutex); |
451 | } | 473 | } |
@@ -456,6 +478,11 @@ void intel_lvds_init(struct drm_device *dev) | |||
456 | dev_priv->panel_fixed_mode = | 478 | dev_priv->panel_fixed_mode = |
457 | drm_mode_duplicate(dev, dev_priv->vbt_mode); | 479 | drm_mode_duplicate(dev, dev_priv->vbt_mode); |
458 | mutex_unlock(&dev->mode_config.mutex); | 480 | mutex_unlock(&dev->mode_config.mutex); |
481 | if (dev_priv->panel_fixed_mode) { | ||
482 | dev_priv->panel_fixed_mode->type |= | ||
483 | DRM_MODE_TYPE_PREFERRED; | ||
484 | goto out; | ||
485 | } | ||
459 | } | 486 | } |
460 | 487 | ||
461 | /* | 488 | /* |
@@ -472,7 +499,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
472 | if (dev_priv->panel_fixed_mode) { | 499 | if (dev_priv->panel_fixed_mode) { |
473 | dev_priv->panel_fixed_mode->type |= | 500 | dev_priv->panel_fixed_mode->type |= |
474 | DRM_MODE_TYPE_PREFERRED; | 501 | DRM_MODE_TYPE_PREFERRED; |
475 | goto out; /* FIXME: check for quirks */ | 502 | goto out; |
476 | } | 503 | } |
477 | } | 504 | } |
478 | 505 | ||
@@ -480,38 +507,6 @@ void intel_lvds_init(struct drm_device *dev) | |||
480 | if (!dev_priv->panel_fixed_mode) | 507 | if (!dev_priv->panel_fixed_mode) |
481 | goto failed; | 508 | goto failed; |
482 | 509 | ||
483 | /* FIXME: detect aopen & mac mini type stuff automatically? */ | ||
484 | /* | ||
485 | * Blacklist machines with BIOSes that list an LVDS panel without | ||
486 | * actually having one. | ||
487 | */ | ||
488 | if (IS_I945GM(dev)) { | ||
489 | /* aopen mini pc */ | ||
490 | if (dev->pdev->subsystem_vendor == 0xa0a0) | ||
491 | goto failed; | ||
492 | |||
493 | if ((dev->pdev->subsystem_vendor == 0x8086) && | ||
494 | (dev->pdev->subsystem_device == 0x7270)) { | ||
495 | /* It's a Mac Mini or Macbook Pro. | ||
496 | * | ||
497 | * Apple hardware is out to get us. The macbook pro | ||
498 | * has a real LVDS panel, but the mac mini does not, | ||
499 | * and they have the same device IDs. We'll | ||
500 | * distinguish by panel size, on the assumption | ||
501 | * that Apple isn't about to make any machines with an | ||
502 | * 800x600 display. | ||
503 | */ | ||
504 | |||
505 | if (dev_priv->panel_fixed_mode != NULL && | ||
506 | dev_priv->panel_fixed_mode->hdisplay == 800 && | ||
507 | dev_priv->panel_fixed_mode->vdisplay == 600) { | ||
508 | DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n"); | ||
509 | goto failed; | ||
510 | } | ||
511 | } | ||
512 | } | ||
513 | |||
514 | |||
515 | out: | 510 | out: |
516 | drm_sysfs_connector_add(connector); | 511 | drm_sysfs_connector_add(connector); |
517 | return; | 512 | return; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 407215469102..fbe6f3931b1b 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -40,13 +40,59 @@ | |||
40 | struct intel_sdvo_priv { | 40 | struct intel_sdvo_priv { |
41 | struct intel_i2c_chan *i2c_bus; | 41 | struct intel_i2c_chan *i2c_bus; |
42 | int slaveaddr; | 42 | int slaveaddr; |
43 | |||
44 | /* Register for the SDVO device: SDVOB or SDVOC */ | ||
43 | int output_device; | 45 | int output_device; |
44 | 46 | ||
45 | u16 active_outputs; | 47 | /* Active outputs controlled by this SDVO output */ |
48 | uint16_t controlled_output; | ||
46 | 49 | ||
50 | /* | ||
51 | * Capabilities of the SDVO device returned by | ||
52 | * i830_sdvo_get_capabilities() | ||
53 | */ | ||
47 | struct intel_sdvo_caps caps; | 54 | struct intel_sdvo_caps caps; |
55 | |||
56 | /* Pixel clock limitations reported by the SDVO device, in kHz */ | ||
48 | int pixel_clock_min, pixel_clock_max; | 57 | int pixel_clock_min, pixel_clock_max; |
49 | 58 | ||
59 | /** | ||
60 | * This is set if we're going to treat the device as TV-out. | ||
61 | * | ||
62 | * While we have these nice friendly flags for output types that ought | ||
63 | * to decide this for us, the S-Video output on our HDMI+S-Video card | ||
64 | * shows up as RGB1 (VGA). | ||
65 | */ | ||
66 | bool is_tv; | ||
67 | |||
68 | /** | ||
69 | * This is set if we treat the device as HDMI, instead of DVI. | ||
70 | */ | ||
71 | bool is_hdmi; | ||
72 | |||
73 | /** | ||
74 | * Returned SDTV resolutions allowed for the current format, if the | ||
75 | * device reported it. | ||
76 | */ | ||
77 | struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; | ||
78 | |||
79 | /** | ||
80 | * Current selected TV format. | ||
81 | * | ||
82 | * This is stored in the same structure that's passed to the device, for | ||
83 | * convenience. | ||
84 | */ | ||
85 | struct intel_sdvo_tv_format tv_format; | ||
86 | |||
87 | /* | ||
88 | * supported encoding mode, used to determine whether HDMI is | ||
89 | * supported | ||
90 | */ | ||
91 | struct intel_sdvo_encode encode; | ||
92 | |||
93 | /* DDC bus used by this SDVO output */ | ||
94 | uint8_t ddc_bus; | ||
95 | |||
50 | int save_sdvo_mult; | 96 | int save_sdvo_mult; |
51 | u16 save_active_outputs; | 97 | u16 save_active_outputs; |
52 | struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; | 98 | struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; |
@@ -147,9 +193,9 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | |||
147 | 193 | ||
148 | #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} | 194 | #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} |
149 | /** Mapping of command numbers to names, for debug output */ | 195 | /** Mapping of command numbers to names, for debug output */ |
150 | const static struct _sdvo_cmd_name { | 196 | static const struct _sdvo_cmd_name { |
151 | u8 cmd; | 197 | u8 cmd; |
152 | char *name; | 198 | char *name; |
153 | } sdvo_cmd_names[] = { | 199 | } sdvo_cmd_names[] = { |
154 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), | 200 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), |
155 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), | 201 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), |
@@ -186,8 +232,35 @@ const static struct _sdvo_cmd_name { | |||
186 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), | 232 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), |
187 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), | 233 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), |
188 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), | 234 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), |
189 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT), | 235 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), |
236 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), | ||
237 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), | ||
238 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), | ||
190 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), | 239 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), |
240 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), | ||
241 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), | ||
242 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), | ||
243 | /* HDMI op code */ | ||
244 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), | ||
245 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), | ||
246 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), | ||
247 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), | ||
248 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), | ||
249 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), | ||
250 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), | ||
251 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), | ||
252 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), | ||
253 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), | ||
254 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), | ||
255 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), | ||
256 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), | ||
257 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), | ||
258 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), | ||
259 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), | ||
260 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), | ||
261 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), | ||
262 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), | ||
263 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), | ||
191 | }; | 264 | }; |
192 | 265 | ||
193 | #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") | 266 | #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") |
@@ -506,6 +579,50 @@ static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, | |||
506 | SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); | 579 | SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); |
507 | } | 580 | } |
508 | 581 | ||
582 | static bool | ||
583 | intel_sdvo_create_preferred_input_timing(struct intel_output *output, | ||
584 | uint16_t clock, | ||
585 | uint16_t width, | ||
586 | uint16_t height) | ||
587 | { | ||
588 | struct intel_sdvo_preferred_input_timing_args args; | ||
589 | uint8_t status; | ||
590 | |||
591 | args.clock = clock; | ||
592 | args.width = width; | ||
593 | args.height = height; | ||
594 | intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, | ||
595 | &args, sizeof(args)); | ||
596 | status = intel_sdvo_read_response(output, NULL, 0); | ||
597 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
598 | return false; | ||
599 | |||
600 | return true; | ||
601 | } | ||
602 | |||
603 | static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, | ||
604 | struct intel_sdvo_dtd *dtd) | ||
605 | { | ||
606 | bool status; | ||
607 | |||
608 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, | ||
609 | NULL, 0); | ||
610 | |||
611 | status = intel_sdvo_read_response(output, &dtd->part1, | ||
612 | sizeof(dtd->part1)); | ||
613 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
614 | return false; | ||
615 | |||
616 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, | ||
617 | NULL, 0); | ||
618 | |||
619 | status = intel_sdvo_read_response(output, &dtd->part2, | ||
620 | sizeof(dtd->part2)); | ||
621 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
622 | return false; | ||
623 | |||
624 | return false; | ||
625 | } | ||
509 | 626 | ||
510 | static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) | 627 | static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) |
511 | { | 628 | { |
@@ -536,36 +653,12 @@ static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 | |||
536 | return true; | 653 | return true; |
537 | } | 654 | } |
538 | 655 | ||
539 | static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | 656 | static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, |
540 | struct drm_display_mode *mode, | 657 | struct drm_display_mode *mode) |
541 | struct drm_display_mode *adjusted_mode) | ||
542 | { | ||
543 | /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO | ||
544 | * device will be told of the multiplier during mode_set. | ||
545 | */ | ||
546 | adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); | ||
547 | return true; | ||
548 | } | ||
549 | |||
550 | static void intel_sdvo_mode_set(struct drm_encoder *encoder, | ||
551 | struct drm_display_mode *mode, | ||
552 | struct drm_display_mode *adjusted_mode) | ||
553 | { | 658 | { |
554 | struct drm_device *dev = encoder->dev; | 659 | uint16_t width, height; |
555 | struct drm_i915_private *dev_priv = dev->dev_private; | 660 | uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; |
556 | struct drm_crtc *crtc = encoder->crtc; | 661 | uint16_t h_sync_offset, v_sync_offset; |
557 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
558 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
559 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
560 | u16 width, height; | ||
561 | u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len; | ||
562 | u16 h_sync_offset, v_sync_offset; | ||
563 | u32 sdvox; | ||
564 | struct intel_sdvo_dtd output_dtd; | ||
565 | int sdvo_pixel_multiply; | ||
566 | |||
567 | if (!mode) | ||
568 | return; | ||
569 | 662 | ||
570 | width = mode->crtc_hdisplay; | 663 | width = mode->crtc_hdisplay; |
571 | height = mode->crtc_vdisplay; | 664 | height = mode->crtc_vdisplay; |
@@ -580,93 +673,423 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
580 | h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; | 673 | h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; |
581 | v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; | 674 | v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; |
582 | 675 | ||
583 | output_dtd.part1.clock = mode->clock / 10; | 676 | dtd->part1.clock = mode->clock / 10; |
584 | output_dtd.part1.h_active = width & 0xff; | 677 | dtd->part1.h_active = width & 0xff; |
585 | output_dtd.part1.h_blank = h_blank_len & 0xff; | 678 | dtd->part1.h_blank = h_blank_len & 0xff; |
586 | output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) | | 679 | dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | |
587 | ((h_blank_len >> 8) & 0xf); | 680 | ((h_blank_len >> 8) & 0xf); |
588 | output_dtd.part1.v_active = height & 0xff; | 681 | dtd->part1.v_active = height & 0xff; |
589 | output_dtd.part1.v_blank = v_blank_len & 0xff; | 682 | dtd->part1.v_blank = v_blank_len & 0xff; |
590 | output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) | | 683 | dtd->part1.v_high = (((height >> 8) & 0xf) << 4) | |
591 | ((v_blank_len >> 8) & 0xf); | 684 | ((v_blank_len >> 8) & 0xf); |
592 | 685 | ||
593 | output_dtd.part2.h_sync_off = h_sync_offset; | 686 | dtd->part2.h_sync_off = h_sync_offset; |
594 | output_dtd.part2.h_sync_width = h_sync_len & 0xff; | 687 | dtd->part2.h_sync_width = h_sync_len & 0xff; |
595 | output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | | 688 | dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | |
596 | (v_sync_len & 0xf); | 689 | (v_sync_len & 0xf); |
597 | output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | | 690 | dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | |
598 | ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | | 691 | ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | |
599 | ((v_sync_len & 0x30) >> 4); | 692 | ((v_sync_len & 0x30) >> 4); |
600 | 693 | ||
601 | output_dtd.part2.dtd_flags = 0x18; | 694 | dtd->part2.dtd_flags = 0x18; |
602 | if (mode->flags & DRM_MODE_FLAG_PHSYNC) | 695 | if (mode->flags & DRM_MODE_FLAG_PHSYNC) |
603 | output_dtd.part2.dtd_flags |= 0x2; | 696 | dtd->part2.dtd_flags |= 0x2; |
604 | if (mode->flags & DRM_MODE_FLAG_PVSYNC) | 697 | if (mode->flags & DRM_MODE_FLAG_PVSYNC) |
605 | output_dtd.part2.dtd_flags |= 0x4; | 698 | dtd->part2.dtd_flags |= 0x4; |
699 | |||
700 | dtd->part2.sdvo_flags = 0; | ||
701 | dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; | ||
702 | dtd->part2.reserved = 0; | ||
703 | } | ||
704 | |||
705 | static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, | ||
706 | struct intel_sdvo_dtd *dtd) | ||
707 | { | ||
708 | uint16_t width, height; | ||
709 | uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; | ||
710 | uint16_t h_sync_offset, v_sync_offset; | ||
711 | |||
712 | width = mode->crtc_hdisplay; | ||
713 | height = mode->crtc_vdisplay; | ||
714 | |||
715 | /* do some mode translations */ | ||
716 | h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; | ||
717 | h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; | ||
718 | |||
719 | v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; | ||
720 | v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
721 | |||
722 | h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; | ||
723 | v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; | ||
724 | |||
725 | mode->hdisplay = dtd->part1.h_active; | ||
726 | mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; | ||
727 | mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; | ||
728 | mode->hsync_start += (dtd->part2.sync_off_width_high & 0xa0) << 2; | ||
729 | mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; | ||
730 | mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; | ||
731 | mode->htotal = mode->hdisplay + dtd->part1.h_blank; | ||
732 | mode->htotal += (dtd->part1.h_high & 0xf) << 8; | ||
733 | |||
734 | mode->vdisplay = dtd->part1.v_active; | ||
735 | mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; | ||
736 | mode->vsync_start = mode->vdisplay; | ||
737 | mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; | ||
738 | mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0a) << 2; | ||
739 | mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; | ||
740 | mode->vsync_end = mode->vsync_start + | ||
741 | (dtd->part2.v_sync_off_width & 0xf); | ||
742 | mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; | ||
743 | mode->vtotal = mode->vdisplay + dtd->part1.v_blank; | ||
744 | mode->vtotal += (dtd->part1.v_high & 0xf) << 8; | ||
745 | |||
746 | mode->clock = dtd->part1.clock * 10; | ||
747 | |||
748 | mode->flags &= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); | ||
749 | if (dtd->part2.dtd_flags & 0x2) | ||
750 | mode->flags |= DRM_MODE_FLAG_PHSYNC; | ||
751 | if (dtd->part2.dtd_flags & 0x4) | ||
752 | mode->flags |= DRM_MODE_FLAG_PVSYNC; | ||
753 | } | ||
754 | |||
755 | static bool intel_sdvo_get_supp_encode(struct intel_output *output, | ||
756 | struct intel_sdvo_encode *encode) | ||
757 | { | ||
758 | uint8_t status; | ||
759 | |||
760 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); | ||
761 | status = intel_sdvo_read_response(output, encode, sizeof(*encode)); | ||
762 | if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ | ||
763 | memset(encode, 0, sizeof(*encode)); | ||
764 | return false; | ||
765 | } | ||
766 | |||
767 | return true; | ||
768 | } | ||
769 | |||
770 | static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) | ||
771 | { | ||
772 | uint8_t status; | ||
773 | |||
774 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); | ||
775 | status = intel_sdvo_read_response(output, NULL, 0); | ||
776 | |||
777 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
778 | } | ||
779 | |||
780 | static bool intel_sdvo_set_colorimetry(struct intel_output *output, | ||
781 | uint8_t mode) | ||
782 | { | ||
783 | uint8_t status; | ||
784 | |||
785 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); | ||
786 | status = intel_sdvo_read_response(output, NULL, 0); | ||
787 | |||
788 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
789 | } | ||
790 | |||
791 | #if 0 | ||
792 | static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) | ||
793 | { | ||
794 | int i, j; | ||
795 | uint8_t set_buf_index[2]; | ||
796 | uint8_t av_split; | ||
797 | uint8_t buf_size; | ||
798 | uint8_t buf[48]; | ||
799 | uint8_t *pos; | ||
800 | |||
801 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); | ||
802 | intel_sdvo_read_response(output, &av_split, 1); | ||
803 | |||
804 | for (i = 0; i <= av_split; i++) { | ||
805 | set_buf_index[0] = i; set_buf_index[1] = 0; | ||
806 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, | ||
807 | set_buf_index, 2); | ||
808 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); | ||
809 | intel_sdvo_read_response(output, &buf_size, 1); | ||
810 | |||
811 | pos = buf; | ||
812 | for (j = 0; j <= buf_size; j += 8) { | ||
813 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, | ||
814 | NULL, 0); | ||
815 | intel_sdvo_read_response(output, pos, 8); | ||
816 | pos += 8; | ||
817 | } | ||
818 | } | ||
819 | } | ||
820 | #endif | ||
821 | |||
822 | static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, | ||
823 | uint8_t *data, int8_t size, uint8_t tx_rate) | ||
824 | { | ||
825 | uint8_t set_buf_index[2]; | ||
826 | |||
827 | set_buf_index[0] = index; | ||
828 | set_buf_index[1] = 0; | ||
829 | |||
830 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); | ||
831 | |||
832 | for (; size > 0; size -= 8) { | ||
833 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); | ||
834 | data += 8; | ||
835 | } | ||
836 | |||
837 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); | ||
838 | } | ||
839 | |||
840 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) | ||
841 | { | ||
842 | uint8_t csum = 0; | ||
843 | int i; | ||
844 | |||
845 | for (i = 0; i < size; i++) | ||
846 | csum += data[i]; | ||
847 | |||
848 | return 0x100 - csum; | ||
849 | } | ||
850 | |||
851 | #define DIP_TYPE_AVI 0x82 | ||
852 | #define DIP_VERSION_AVI 0x2 | ||
853 | #define DIP_LEN_AVI 13 | ||
854 | |||
855 | struct dip_infoframe { | ||
856 | uint8_t type; | ||
857 | uint8_t version; | ||
858 | uint8_t len; | ||
859 | uint8_t checksum; | ||
860 | union { | ||
861 | struct { | ||
862 | /* Packet Byte #1 */ | ||
863 | uint8_t S:2; | ||
864 | uint8_t B:2; | ||
865 | uint8_t A:1; | ||
866 | uint8_t Y:2; | ||
867 | uint8_t rsvd1:1; | ||
868 | /* Packet Byte #2 */ | ||
869 | uint8_t R:4; | ||
870 | uint8_t M:2; | ||
871 | uint8_t C:2; | ||
872 | /* Packet Byte #3 */ | ||
873 | uint8_t SC:2; | ||
874 | uint8_t Q:2; | ||
875 | uint8_t EC:3; | ||
876 | uint8_t ITC:1; | ||
877 | /* Packet Byte #4 */ | ||
878 | uint8_t VIC:7; | ||
879 | uint8_t rsvd2:1; | ||
880 | /* Packet Byte #5 */ | ||
881 | uint8_t PR:4; | ||
882 | uint8_t rsvd3:4; | ||
883 | /* Packet Byte #6~13 */ | ||
884 | uint16_t top_bar_end; | ||
885 | uint16_t bottom_bar_start; | ||
886 | uint16_t left_bar_end; | ||
887 | uint16_t right_bar_start; | ||
888 | } avi; | ||
889 | struct { | ||
890 | /* Packet Byte #1 */ | ||
891 | uint8_t channel_count:3; | ||
892 | uint8_t rsvd1:1; | ||
893 | uint8_t coding_type:4; | ||
894 | /* Packet Byte #2 */ | ||
895 | uint8_t sample_size:2; /* SS0, SS1 */ | ||
896 | uint8_t sample_frequency:3; | ||
897 | uint8_t rsvd2:3; | ||
898 | /* Packet Byte #3 */ | ||
899 | uint8_t coding_type_private:5; | ||
900 | uint8_t rsvd3:3; | ||
901 | /* Packet Byte #4 */ | ||
902 | uint8_t channel_allocation; | ||
903 | /* Packet Byte #5 */ | ||
904 | uint8_t rsvd4:3; | ||
905 | uint8_t level_shift:4; | ||
906 | uint8_t downmix_inhibit:1; | ||
907 | } audio; | ||
908 | uint8_t payload[28]; | ||
909 | } __attribute__ ((packed)) u; | ||
910 | } __attribute__((packed)); | ||
911 | |||
912 | static void intel_sdvo_set_avi_infoframe(struct intel_output *output, | ||
913 | struct drm_display_mode * mode) | ||
914 | { | ||
915 | struct dip_infoframe avi_if = { | ||
916 | .type = DIP_TYPE_AVI, | ||
917 | .version = DIP_VERSION_AVI, | ||
918 | .len = DIP_LEN_AVI, | ||
919 | }; | ||
920 | |||
921 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, | ||
922 | 4 + avi_if.len); | ||
923 | intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, | ||
924 | SDVO_HBUF_TX_VSYNC); | ||
925 | } | ||
926 | |||
927 | static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | ||
928 | struct drm_display_mode *mode, | ||
929 | struct drm_display_mode *adjusted_mode) | ||
930 | { | ||
931 | struct intel_output *output = enc_to_intel_output(encoder); | ||
932 | struct intel_sdvo_priv *dev_priv = output->dev_priv; | ||
606 | 933 | ||
607 | output_dtd.part2.sdvo_flags = 0; | 934 | if (!dev_priv->is_tv) { |
608 | output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0; | 935 | /* Make the CRTC code factor in the SDVO pixel multiplier. The |
609 | output_dtd.part2.reserved = 0; | 936 | * SDVO device will be told of the multiplier during mode_set. |
937 | */ | ||
938 | adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); | ||
939 | } else { | ||
940 | struct intel_sdvo_dtd output_dtd; | ||
941 | bool success; | ||
942 | |||
943 | /* We need to construct preferred input timings based on our | ||
944 | * output timings. To do that, we have to set the output | ||
945 | * timings, even though this isn't really the right place in | ||
946 | * the sequence to do it. Oh well. | ||
947 | */ | ||
948 | |||
949 | |||
950 | /* Set output timings */ | ||
951 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); | ||
952 | intel_sdvo_set_target_output(output, | ||
953 | dev_priv->controlled_output); | ||
954 | intel_sdvo_set_output_timing(output, &output_dtd); | ||
955 | |||
956 | /* Set the input timing to the screen. Assume always input 0. */ | ||
957 | intel_sdvo_set_target_input(output, true, false); | ||
958 | |||
959 | |||
960 | success = intel_sdvo_create_preferred_input_timing(output, | ||
961 | mode->clock / 10, | ||
962 | mode->hdisplay, | ||
963 | mode->vdisplay); | ||
964 | if (success) { | ||
965 | struct intel_sdvo_dtd input_dtd; | ||
610 | 966 | ||
611 | /* Set the output timing to the screen */ | 967 | intel_sdvo_get_preferred_input_timing(output, |
612 | intel_sdvo_set_target_output(intel_output, sdvo_priv->active_outputs); | 968 | &input_dtd); |
613 | intel_sdvo_set_output_timing(intel_output, &output_dtd); | 969 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); |
970 | |||
971 | } else { | ||
972 | return false; | ||
973 | } | ||
974 | } | ||
975 | return true; | ||
976 | } | ||
977 | |||
978 | static void intel_sdvo_mode_set(struct drm_encoder *encoder, | ||
979 | struct drm_display_mode *mode, | ||
980 | struct drm_display_mode *adjusted_mode) | ||
981 | { | ||
982 | struct drm_device *dev = encoder->dev; | ||
983 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
984 | struct drm_crtc *crtc = encoder->crtc; | ||
985 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
986 | struct intel_output *output = enc_to_intel_output(encoder); | ||
987 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | ||
988 | u32 sdvox = 0; | ||
989 | int sdvo_pixel_multiply; | ||
990 | struct intel_sdvo_in_out_map in_out; | ||
991 | struct intel_sdvo_dtd input_dtd; | ||
992 | u8 status; | ||
993 | |||
994 | if (!mode) | ||
995 | return; | ||
996 | |||
997 | /* First, set the input mapping for the first input to our controlled | ||
998 | * output. This is only correct if we're a single-input device, in | ||
999 | * which case the first input is the output from the appropriate SDVO | ||
1000 | * channel on the motherboard. In a two-input device, the first input | ||
1001 | * will be SDVOB and the second SDVOC. | ||
1002 | */ | ||
1003 | in_out.in0 = sdvo_priv->controlled_output; | ||
1004 | in_out.in1 = 0; | ||
1005 | |||
1006 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, | ||
1007 | &in_out, sizeof(in_out)); | ||
1008 | status = intel_sdvo_read_response(output, NULL, 0); | ||
1009 | |||
1010 | if (sdvo_priv->is_hdmi) { | ||
1011 | intel_sdvo_set_avi_infoframe(output, mode); | ||
1012 | sdvox |= SDVO_AUDIO_ENABLE; | ||
1013 | } | ||
1014 | |||
1015 | intel_sdvo_get_dtd_from_mode(&input_dtd, mode); | ||
1016 | |||
1017 | /* If it's a TV, we already set the output timing in mode_fixup. | ||
1018 | * Otherwise, the output timing is equal to the input timing. | ||
1019 | */ | ||
1020 | if (!sdvo_priv->is_tv) { | ||
1021 | /* Set the output timing to the screen */ | ||
1022 | intel_sdvo_set_target_output(output, | ||
1023 | sdvo_priv->controlled_output); | ||
1024 | intel_sdvo_set_output_timing(output, &input_dtd); | ||
1025 | } | ||
614 | 1026 | ||
615 | /* Set the input timing to the screen. Assume always input 0. */ | 1027 | /* Set the input timing to the screen. Assume always input 0. */ |
616 | intel_sdvo_set_target_input(intel_output, true, false); | 1028 | intel_sdvo_set_target_input(output, true, false); |
617 | 1029 | ||
618 | /* We would like to use i830_sdvo_create_preferred_input_timing() to | 1030 | /* We would like to use intel_sdvo_create_preferred_input_timing() to |
619 | * provide the device with a timing it can support, if it supports that | 1031 | * provide the device with a timing it can support, if it supports that |
620 | * feature. However, presumably we would need to adjust the CRTC to | 1032 | * feature. However, presumably we would need to adjust the CRTC to |
621 | * output the preferred timing, and we don't support that currently. | 1033 | * output the preferred timing, and we don't support that currently. |
622 | */ | 1034 | */ |
623 | intel_sdvo_set_input_timing(intel_output, &output_dtd); | 1035 | #if 0 |
1036 | success = intel_sdvo_create_preferred_input_timing(output, clock, | ||
1037 | width, height); | ||
1038 | if (success) { | ||
1039 | struct intel_sdvo_dtd *input_dtd; | ||
1040 | |||
1041 | intel_sdvo_get_preferred_input_timing(output, &input_dtd); | ||
1042 | intel_sdvo_set_input_timing(output, &input_dtd); | ||
1043 | } | ||
1044 | #else | ||
1045 | intel_sdvo_set_input_timing(output, &input_dtd); | ||
1046 | #endif | ||
624 | 1047 | ||
625 | switch (intel_sdvo_get_pixel_multiplier(mode)) { | 1048 | switch (intel_sdvo_get_pixel_multiplier(mode)) { |
626 | case 1: | 1049 | case 1: |
627 | intel_sdvo_set_clock_rate_mult(intel_output, | 1050 | intel_sdvo_set_clock_rate_mult(output, |
628 | SDVO_CLOCK_RATE_MULT_1X); | 1051 | SDVO_CLOCK_RATE_MULT_1X); |
629 | break; | 1052 | break; |
630 | case 2: | 1053 | case 2: |
631 | intel_sdvo_set_clock_rate_mult(intel_output, | 1054 | intel_sdvo_set_clock_rate_mult(output, |
632 | SDVO_CLOCK_RATE_MULT_2X); | 1055 | SDVO_CLOCK_RATE_MULT_2X); |
633 | break; | 1056 | break; |
634 | case 4: | 1057 | case 4: |
635 | intel_sdvo_set_clock_rate_mult(intel_output, | 1058 | intel_sdvo_set_clock_rate_mult(output, |
636 | SDVO_CLOCK_RATE_MULT_4X); | 1059 | SDVO_CLOCK_RATE_MULT_4X); |
637 | break; | 1060 | break; |
638 | } | 1061 | } |
639 | 1062 | ||
640 | /* Set the SDVO control regs. */ | 1063 | /* Set the SDVO control regs. */ |
641 | if (0/*IS_I965GM(dev)*/) { | 1064 | if (IS_I965G(dev)) { |
642 | sdvox = SDVO_BORDER_ENABLE; | 1065 | sdvox |= SDVO_BORDER_ENABLE | |
643 | } else { | 1066 | SDVO_VSYNC_ACTIVE_HIGH | |
644 | sdvox = I915_READ(sdvo_priv->output_device); | 1067 | SDVO_HSYNC_ACTIVE_HIGH; |
645 | switch (sdvo_priv->output_device) { | 1068 | } else { |
646 | case SDVOB: | 1069 | sdvox |= I915_READ(sdvo_priv->output_device); |
647 | sdvox &= SDVOB_PRESERVE_MASK; | 1070 | switch (sdvo_priv->output_device) { |
648 | break; | 1071 | case SDVOB: |
649 | case SDVOC: | 1072 | sdvox &= SDVOB_PRESERVE_MASK; |
650 | sdvox &= SDVOC_PRESERVE_MASK; | 1073 | break; |
651 | break; | 1074 | case SDVOC: |
652 | } | 1075 | sdvox &= SDVOC_PRESERVE_MASK; |
653 | sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; | 1076 | break; |
654 | } | 1077 | } |
1078 | sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; | ||
1079 | } | ||
655 | if (intel_crtc->pipe == 1) | 1080 | if (intel_crtc->pipe == 1) |
656 | sdvox |= SDVO_PIPE_B_SELECT; | 1081 | sdvox |= SDVO_PIPE_B_SELECT; |
657 | 1082 | ||
658 | sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); | 1083 | sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); |
659 | if (IS_I965G(dev)) { | 1084 | if (IS_I965G(dev)) { |
660 | /* done in crtc_mode_set as the dpll_md reg must be written | 1085 | /* done in crtc_mode_set as the dpll_md reg must be written early */ |
661 | early */ | 1086 | } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { |
662 | } else if (IS_I945G(dev) || IS_I945GM(dev)) { | 1087 | /* done in crtc_mode_set as it lives inside the dpll register */ |
663 | /* done in crtc_mode_set as it lives inside the | ||
664 | dpll register */ | ||
665 | } else { | 1088 | } else { |
666 | sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; | 1089 | sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; |
667 | } | 1090 | } |
668 | 1091 | ||
669 | intel_sdvo_write_sdvox(intel_output, sdvox); | 1092 | intel_sdvo_write_sdvox(output, sdvox); |
670 | } | 1093 | } |
671 | 1094 | ||
672 | static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | 1095 | static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) |
@@ -714,7 +1137,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | |||
714 | 1137 | ||
715 | if (0) | 1138 | if (0) |
716 | intel_sdvo_set_encoder_power_state(intel_output, mode); | 1139 | intel_sdvo_set_encoder_power_state(intel_output, mode); |
717 | intel_sdvo_set_active_outputs(intel_output, sdvo_priv->active_outputs); | 1140 | intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); |
718 | } | 1141 | } |
719 | return; | 1142 | return; |
720 | } | 1143 | } |
@@ -752,6 +1175,9 @@ static void intel_sdvo_save(struct drm_connector *connector) | |||
752 | &sdvo_priv->save_output_dtd[o]); | 1175 | &sdvo_priv->save_output_dtd[o]); |
753 | } | 1176 | } |
754 | } | 1177 | } |
1178 | if (sdvo_priv->is_tv) { | ||
1179 | /* XXX: Save TV format/enhancements. */ | ||
1180 | } | ||
755 | 1181 | ||
756 | sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); | 1182 | sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); |
757 | } | 1183 | } |
@@ -759,7 +1185,6 @@ static void intel_sdvo_save(struct drm_connector *connector) | |||
759 | static void intel_sdvo_restore(struct drm_connector *connector) | 1185 | static void intel_sdvo_restore(struct drm_connector *connector) |
760 | { | 1186 | { |
761 | struct drm_device *dev = connector->dev; | 1187 | struct drm_device *dev = connector->dev; |
762 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
763 | struct intel_output *intel_output = to_intel_output(connector); | 1188 | struct intel_output *intel_output = to_intel_output(connector); |
764 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1189 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; |
765 | int o; | 1190 | int o; |
@@ -790,7 +1215,11 @@ static void intel_sdvo_restore(struct drm_connector *connector) | |||
790 | 1215 | ||
791 | intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); | 1216 | intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); |
792 | 1217 | ||
793 | I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX); | 1218 | if (sdvo_priv->is_tv) { |
1219 | /* XXX: Restore TV format/enhancements. */ | ||
1220 | } | ||
1221 | |||
1222 | intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); | ||
794 | 1223 | ||
795 | if (sdvo_priv->save_SDVOX & SDVO_ENABLE) | 1224 | if (sdvo_priv->save_SDVOX & SDVO_ENABLE) |
796 | { | 1225 | { |
@@ -916,20 +1345,173 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
916 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1345 | status = intel_sdvo_read_response(intel_output, &response, 2); |
917 | 1346 | ||
918 | DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); | 1347 | DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); |
1348 | |||
1349 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
1350 | return connector_status_unknown; | ||
1351 | |||
919 | if ((response[0] != 0) || (response[1] != 0)) | 1352 | if ((response[0] != 0) || (response[1] != 0)) |
920 | return connector_status_connected; | 1353 | return connector_status_connected; |
921 | else | 1354 | else |
922 | return connector_status_disconnected; | 1355 | return connector_status_disconnected; |
923 | } | 1356 | } |
924 | 1357 | ||
925 | static int intel_sdvo_get_modes(struct drm_connector *connector) | 1358 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) |
926 | { | 1359 | { |
927 | struct intel_output *intel_output = to_intel_output(connector); | 1360 | struct intel_output *intel_output = to_intel_output(connector); |
1361 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
928 | 1362 | ||
929 | /* set the bus switch and get the modes */ | 1363 | /* set the bus switch and get the modes */ |
930 | intel_sdvo_set_control_bus_switch(intel_output, SDVO_CONTROL_BUS_DDC2); | 1364 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); |
931 | intel_ddc_get_modes(intel_output); | 1365 | intel_ddc_get_modes(intel_output); |
932 | 1366 | ||
1367 | #if 0 | ||
1368 | struct drm_device *dev = encoder->dev; | ||
1369 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1370 | /* Mac mini hack. On this device, I get DDC through the analog, which | ||
1371 | * load-detects as disconnected. I fail to DDC through the SDVO DDC, | ||
1372 | * but it does load-detect as connected. So, just steal the DDC bits | ||
1373 | * from analog when we fail at finding it the right way. | ||
1374 | */ | ||
1375 | crt = xf86_config->output[0]; | ||
1376 | intel_output = crt->driver_private; | ||
1377 | if (intel_output->type == I830_OUTPUT_ANALOG && | ||
1378 | crt->funcs->detect(crt) == XF86OutputStatusDisconnected) { | ||
1379 | I830I2CInit(pScrn, &intel_output->pDDCBus, GPIOA, "CRTDDC_A"); | ||
1380 | edid_mon = xf86OutputGetEDID(crt, intel_output->pDDCBus); | ||
1381 | xf86DestroyI2CBusRec(intel_output->pDDCBus, true, true); | ||
1382 | } | ||
1383 | if (edid_mon) { | ||
1384 | xf86OutputSetEDID(output, edid_mon); | ||
1385 | modes = xf86OutputGetEDIDModes(output); | ||
1386 | } | ||
1387 | #endif | ||
1388 | } | ||
1389 | |||
1390 | /** | ||
1391 | * This function checks the current TV format, and chooses a default if | ||
1392 | * it hasn't been set. | ||
1393 | */ | ||
1394 | static void | ||
1395 | intel_sdvo_check_tv_format(struct intel_output *output) | ||
1396 | { | ||
1397 | struct intel_sdvo_priv *dev_priv = output->dev_priv; | ||
1398 | struct intel_sdvo_tv_format format, unset; | ||
1399 | uint8_t status; | ||
1400 | |||
1401 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMAT, NULL, 0); | ||
1402 | status = intel_sdvo_read_response(output, &format, sizeof(format)); | ||
1403 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
1404 | return; | ||
1405 | |||
1406 | memset(&unset, 0, sizeof(unset)); | ||
1407 | if (memcmp(&format, &unset, sizeof(format))) { | ||
1408 | DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n", | ||
1409 | SDVO_NAME(dev_priv)); | ||
1410 | |||
1411 | format.ntsc_m = true; | ||
1412 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, NULL, 0); | ||
1413 | status = intel_sdvo_read_response(output, NULL, 0); | ||
1414 | } | ||
1415 | } | ||
1416 | |||
1417 | /* | ||
1418 | * Set of SDVO TV modes. | ||
1419 | * Note! This is in reply order (see loop in get_tv_modes). | ||
1420 | * XXX: all 60Hz refresh? | ||
1421 | */ | ||
1422 | struct drm_display_mode sdvo_tv_modes[] = { | ||
1423 | { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815680, 321, 384, 416, | ||
1424 | 200, 0, 232, 201, 233, 4196112, 0, | ||
1425 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1426 | { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814080, 321, 384, 416, | ||
1427 | 240, 0, 272, 241, 273, 4196112, 0, | ||
1428 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1429 | { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910080, 401, 464, 496, | ||
1430 | 300, 0, 332, 301, 333, 4196112, 0, | ||
1431 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1432 | { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913280, 641, 704, 736, | ||
1433 | 350, 0, 382, 351, 383, 4196112, 0, | ||
1434 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1435 | { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736, | ||
1436 | 400, 0, 432, 401, 433, 4196112, 0, | ||
1437 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1438 | { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736, | ||
1439 | 400, 0, 432, 401, 433, 4196112, 0, | ||
1440 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1441 | { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624000, 705, 768, 800, | ||
1442 | 480, 0, 512, 481, 513, 4196112, 0, | ||
1443 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1444 | { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232000, 705, 768, 800, | ||
1445 | 576, 0, 608, 577, 609, 4196112, 0, | ||
1446 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1447 | { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751680, 721, 784, 816, | ||
1448 | 350, 0, 382, 351, 383, 4196112, 0, | ||
1449 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1450 | { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199680, 721, 784, 816, | ||
1451 | 400, 0, 432, 401, 433, 4196112, 0, | ||
1452 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1453 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116480, 721, 784, 816, | ||
1454 | 480, 0, 512, 481, 513, 4196112, 0, | ||
1455 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1456 | { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054080, 721, 784, 816, | ||
1457 | 540, 0, 572, 541, 573, 4196112, 0, | ||
1458 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1459 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816640, 721, 784, 816, | ||
1460 | 576, 0, 608, 577, 609, 4196112, 0, | ||
1461 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1462 | { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570560, 769, 832, 864, | ||
1463 | 576, 0, 608, 577, 609, 4196112, 0, | ||
1464 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1465 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030080, 801, 864, 896, | ||
1466 | 600, 0, 632, 601, 633, 4196112, 0, | ||
1467 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1468 | { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581760, 833, 896, 928, | ||
1469 | 624, 0, 656, 625, 657, 4196112, 0, | ||
1470 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1471 | { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707040, 921, 984, 1016, | ||
1472 | 766, 0, 798, 767, 799, 4196112, 0, | ||
1473 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1474 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827200, 1025, 1088, 1120, | ||
1475 | 768, 0, 800, 769, 801, 4196112, 0, | ||
1476 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1477 | { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265920, 1281, 1344, 1376, | ||
1478 | 1024, 0, 1056, 1025, 1057, 4196112, 0, | ||
1479 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1480 | }; | ||
1481 | |||
1482 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | ||
1483 | { | ||
1484 | struct intel_output *output = to_intel_output(connector); | ||
1485 | uint32_t reply = 0; | ||
1486 | uint8_t status; | ||
1487 | int i = 0; | ||
1488 | |||
1489 | intel_sdvo_check_tv_format(output); | ||
1490 | |||
1491 | /* Read the list of supported input resolutions for the selected TV | ||
1492 | * format. | ||
1493 | */ | ||
1494 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, | ||
1495 | NULL, 0); | ||
1496 | status = intel_sdvo_read_response(output, &reply, 3); | ||
1497 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
1498 | return; | ||
1499 | |||
1500 | for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) | ||
1501 | if (reply & (1 << i)) | ||
1502 | drm_mode_probed_add(connector, &sdvo_tv_modes[i]); | ||
1503 | } | ||
1504 | |||
1505 | static int intel_sdvo_get_modes(struct drm_connector *connector) | ||
1506 | { | ||
1507 | struct intel_output *output = to_intel_output(connector); | ||
1508 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | ||
1509 | |||
1510 | if (sdvo_priv->is_tv) | ||
1511 | intel_sdvo_get_tv_modes(connector); | ||
1512 | else | ||
1513 | intel_sdvo_get_ddc_modes(connector); | ||
1514 | |||
933 | if (list_empty(&connector->probed_modes)) | 1515 | if (list_empty(&connector->probed_modes)) |
934 | return 0; | 1516 | return 0; |
935 | return 1; | 1517 | return 1; |
@@ -978,6 +1560,65 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { | |||
978 | }; | 1560 | }; |
979 | 1561 | ||
980 | 1562 | ||
1563 | /** | ||
1564 | * Choose the appropriate DDC bus for control bus switch command for this | ||
1565 | * SDVO output based on the controlled output. | ||
1566 | * | ||
1567 | * DDC bus number assignment is in a priority order of RGB outputs, then TMDS | ||
1568 | * outputs, then LVDS outputs. | ||
1569 | */ | ||
1570 | static void | ||
1571 | intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) | ||
1572 | { | ||
1573 | uint16_t mask = 0; | ||
1574 | unsigned int num_bits; | ||
1575 | |||
1576 | /* Make a mask of outputs less than or equal to our own priority in the | ||
1577 | * list. | ||
1578 | */ | ||
1579 | switch (dev_priv->controlled_output) { | ||
1580 | case SDVO_OUTPUT_LVDS1: | ||
1581 | mask |= SDVO_OUTPUT_LVDS1; | ||
1582 | case SDVO_OUTPUT_LVDS0: | ||
1583 | mask |= SDVO_OUTPUT_LVDS0; | ||
1584 | case SDVO_OUTPUT_TMDS1: | ||
1585 | mask |= SDVO_OUTPUT_TMDS1; | ||
1586 | case SDVO_OUTPUT_TMDS0: | ||
1587 | mask |= SDVO_OUTPUT_TMDS0; | ||
1588 | case SDVO_OUTPUT_RGB1: | ||
1589 | mask |= SDVO_OUTPUT_RGB1; | ||
1590 | case SDVO_OUTPUT_RGB0: | ||
1591 | mask |= SDVO_OUTPUT_RGB0; | ||
1592 | break; | ||
1593 | } | ||
1594 | |||
1595 | /* Count bits to find what number we are in the priority list. */ | ||
1596 | mask &= dev_priv->caps.output_flags; | ||
1597 | num_bits = hweight16(mask); | ||
1598 | if (num_bits > 3) { | ||
1599 | /* if more than 3 outputs, default to DDC bus 3 for now */ | ||
1600 | num_bits = 3; | ||
1601 | } | ||
1602 | |||
1603 | /* Corresponds to SDVO_CONTROL_BUS_DDCx */ | ||
1604 | dev_priv->ddc_bus = 1 << num_bits; | ||
1605 | } | ||
1606 | |||
1607 | static bool | ||
1608 | intel_sdvo_get_digital_encoding_mode(struct intel_output *output) | ||
1609 | { | ||
1610 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | ||
1611 | uint8_t status; | ||
1612 | |||
1613 | intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); | ||
1614 | |||
1615 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); | ||
1616 | status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); | ||
1617 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
1618 | return false; | ||
1619 | return true; | ||
1620 | } | ||
1621 | |||
981 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 1622 | bool intel_sdvo_init(struct drm_device *dev, int output_device) |
982 | { | 1623 | { |
983 | struct drm_connector *connector; | 1624 | struct drm_connector *connector; |
@@ -1040,45 +1681,76 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
1040 | 1681 | ||
1041 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); | 1682 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); |
1042 | 1683 | ||
1043 | memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs)); | 1684 | if (sdvo_priv->caps.output_flags & |
1685 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { | ||
1686 | if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) | ||
1687 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; | ||
1688 | else | ||
1689 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; | ||
1690 | |||
1691 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
1692 | encoder_type = DRM_MODE_ENCODER_TMDS; | ||
1693 | connector_type = DRM_MODE_CONNECTOR_DVID; | ||
1044 | 1694 | ||
1045 | /* TODO, CVBS, SVID, YPRPB & SCART outputs. */ | 1695 | if (intel_sdvo_get_supp_encode(intel_output, |
1046 | if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) | 1696 | &sdvo_priv->encode) && |
1697 | intel_sdvo_get_digital_encoding_mode(intel_output) && | ||
1698 | sdvo_priv->is_hdmi) { | ||
1699 | /* enable hdmi encoding mode if supported */ | ||
1700 | intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); | ||
1701 | intel_sdvo_set_colorimetry(intel_output, | ||
1702 | SDVO_COLORIMETRY_RGB256); | ||
1703 | connector_type = DRM_MODE_CONNECTOR_HDMIA; | ||
1704 | } | ||
1705 | } | ||
1706 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0) | ||
1047 | { | 1707 | { |
1048 | sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0; | 1708 | sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; |
1709 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
1710 | encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
1711 | connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
1712 | sdvo_priv->is_tv = true; | ||
1713 | intel_output->needs_tv_clock = true; | ||
1714 | } | ||
1715 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) | ||
1716 | { | ||
1717 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; | ||
1049 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 1718 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
1050 | encoder_type = DRM_MODE_ENCODER_DAC; | 1719 | encoder_type = DRM_MODE_ENCODER_DAC; |
1051 | connector_type = DRM_MODE_CONNECTOR_VGA; | 1720 | connector_type = DRM_MODE_CONNECTOR_VGA; |
1052 | } | 1721 | } |
1053 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) | 1722 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) |
1054 | { | 1723 | { |
1055 | sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1; | 1724 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; |
1056 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 1725 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
1057 | encoder_type = DRM_MODE_ENCODER_DAC; | 1726 | encoder_type = DRM_MODE_ENCODER_DAC; |
1058 | connector_type = DRM_MODE_CONNECTOR_VGA; | 1727 | connector_type = DRM_MODE_CONNECTOR_VGA; |
1059 | } | 1728 | } |
1060 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) | 1729 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0) |
1061 | { | 1730 | { |
1062 | sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0; | 1731 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |
1063 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 1732 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
1064 | encoder_type = DRM_MODE_ENCODER_TMDS; | 1733 | encoder_type = DRM_MODE_ENCODER_LVDS; |
1065 | connector_type = DRM_MODE_CONNECTOR_DVID; | 1734 | connector_type = DRM_MODE_CONNECTOR_LVDS; |
1066 | } | 1735 | } |
1067 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) | 1736 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1) |
1068 | { | 1737 | { |
1069 | sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1; | 1738 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; |
1070 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 1739 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
1071 | encoder_type = DRM_MODE_ENCODER_TMDS; | 1740 | encoder_type = DRM_MODE_ENCODER_LVDS; |
1072 | connector_type = DRM_MODE_CONNECTOR_DVID; | 1741 | connector_type = DRM_MODE_CONNECTOR_LVDS; |
1073 | } | 1742 | } |
1074 | else | 1743 | else |
1075 | { | 1744 | { |
1076 | unsigned char bytes[2]; | 1745 | unsigned char bytes[2]; |
1077 | 1746 | ||
1747 | sdvo_priv->controlled_output = 0; | ||
1078 | memcpy (bytes, &sdvo_priv->caps.output_flags, 2); | 1748 | memcpy (bytes, &sdvo_priv->caps.output_flags, 2); |
1079 | DRM_DEBUG("%s: No active RGB or TMDS outputs (0x%02x%02x)\n", | 1749 | DRM_DEBUG("%s: Unknown SDVO output type (0x%02x%02x)\n", |
1080 | SDVO_NAME(sdvo_priv), | 1750 | SDVO_NAME(sdvo_priv), |
1081 | bytes[0], bytes[1]); | 1751 | bytes[0], bytes[1]); |
1752 | encoder_type = DRM_MODE_ENCODER_NONE; | ||
1753 | connector_type = DRM_MODE_CONNECTOR_Unknown; | ||
1082 | goto err_i2c; | 1754 | goto err_i2c; |
1083 | } | 1755 | } |
1084 | 1756 | ||
@@ -1089,6 +1761,8 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
1089 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 1761 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); |
1090 | drm_sysfs_connector_add(connector); | 1762 | drm_sysfs_connector_add(connector); |
1091 | 1763 | ||
1764 | intel_sdvo_select_ddc_bus(sdvo_priv); | ||
1765 | |||
1092 | /* Set the input timing to the screen. Assume always input 0. */ | 1766 | /* Set the input timing to the screen. Assume always input 0. */ |
1093 | intel_sdvo_set_target_input(intel_output, true, false); | 1767 | intel_sdvo_set_target_input(intel_output, true, false); |
1094 | 1768 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 861a43f8693c..1117b9c151a6 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h | |||
@@ -173,6 +173,9 @@ struct intel_sdvo_get_trained_inputs_response { | |||
173 | * Returns two struct intel_sdvo_output_flags structures. | 173 | * Returns two struct intel_sdvo_output_flags structures. |
174 | */ | 174 | */ |
175 | #define SDVO_CMD_GET_IN_OUT_MAP 0x06 | 175 | #define SDVO_CMD_GET_IN_OUT_MAP 0x06 |
176 | struct intel_sdvo_in_out_map { | ||
177 | u16 in0, in1; | ||
178 | }; | ||
176 | 179 | ||
177 | /** | 180 | /** |
178 | * Sets the current mapping of SDVO inputs to outputs on the device. | 181 | * Sets the current mapping of SDVO inputs to outputs on the device. |
@@ -206,7 +209,8 @@ struct intel_sdvo_get_trained_inputs_response { | |||
206 | struct intel_sdvo_get_interrupt_event_source_response { | 209 | struct intel_sdvo_get_interrupt_event_source_response { |
207 | u16 interrupt_status; | 210 | u16 interrupt_status; |
208 | unsigned int ambient_light_interrupt:1; | 211 | unsigned int ambient_light_interrupt:1; |
209 | unsigned int pad:7; | 212 | unsigned int hdmi_audio_encrypt_change:1; |
213 | unsigned int pad:6; | ||
210 | } __attribute__((packed)); | 214 | } __attribute__((packed)); |
211 | 215 | ||
212 | /** | 216 | /** |
@@ -305,23 +309,411 @@ struct intel_sdvo_set_target_input_args { | |||
305 | # define SDVO_CLOCK_RATE_MULT_4X (1 << 3) | 309 | # define SDVO_CLOCK_RATE_MULT_4X (1 << 3) |
306 | 310 | ||
307 | #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 | 311 | #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 |
312 | /** 5 bytes of bit flags for TV formats shared by all TV format functions */ | ||
313 | struct intel_sdvo_tv_format { | ||
314 | unsigned int ntsc_m:1; | ||
315 | unsigned int ntsc_j:1; | ||
316 | unsigned int ntsc_443:1; | ||
317 | unsigned int pal_b:1; | ||
318 | unsigned int pal_d:1; | ||
319 | unsigned int pal_g:1; | ||
320 | unsigned int pal_h:1; | ||
321 | unsigned int pal_i:1; | ||
322 | |||
323 | unsigned int pal_m:1; | ||
324 | unsigned int pal_n:1; | ||
325 | unsigned int pal_nc:1; | ||
326 | unsigned int pal_60:1; | ||
327 | unsigned int secam_b:1; | ||
328 | unsigned int secam_d:1; | ||
329 | unsigned int secam_g:1; | ||
330 | unsigned int secam_k:1; | ||
331 | |||
332 | unsigned int secam_k1:1; | ||
333 | unsigned int secam_l:1; | ||
334 | unsigned int secam_60:1; | ||
335 | unsigned int hdtv_std_smpte_240m_1080i_59:1; | ||
336 | unsigned int hdtv_std_smpte_240m_1080i_60:1; | ||
337 | unsigned int hdtv_std_smpte_260m_1080i_59:1; | ||
338 | unsigned int hdtv_std_smpte_260m_1080i_60:1; | ||
339 | unsigned int hdtv_std_smpte_274m_1080i_50:1; | ||
340 | |||
341 | unsigned int hdtv_std_smpte_274m_1080i_59:1; | ||
342 | unsigned int hdtv_std_smpte_274m_1080i_60:1; | ||
343 | unsigned int hdtv_std_smpte_274m_1080p_23:1; | ||
344 | unsigned int hdtv_std_smpte_274m_1080p_24:1; | ||
345 | unsigned int hdtv_std_smpte_274m_1080p_25:1; | ||
346 | unsigned int hdtv_std_smpte_274m_1080p_29:1; | ||
347 | unsigned int hdtv_std_smpte_274m_1080p_30:1; | ||
348 | unsigned int hdtv_std_smpte_274m_1080p_50:1; | ||
349 | |||
350 | unsigned int hdtv_std_smpte_274m_1080p_59:1; | ||
351 | unsigned int hdtv_std_smpte_274m_1080p_60:1; | ||
352 | unsigned int hdtv_std_smpte_295m_1080i_50:1; | ||
353 | unsigned int hdtv_std_smpte_295m_1080p_50:1; | ||
354 | unsigned int hdtv_std_smpte_296m_720p_59:1; | ||
355 | unsigned int hdtv_std_smpte_296m_720p_60:1; | ||
356 | unsigned int hdtv_std_smpte_296m_720p_50:1; | ||
357 | unsigned int hdtv_std_smpte_293m_480p_59:1; | ||
358 | |||
359 | unsigned int hdtv_std_smpte_170m_480i_59:1; | ||
360 | unsigned int hdtv_std_iturbt601_576i_50:1; | ||
361 | unsigned int hdtv_std_iturbt601_576p_50:1; | ||
362 | unsigned int hdtv_std_eia_7702a_480i_60:1; | ||
363 | unsigned int hdtv_std_eia_7702a_480p_60:1; | ||
364 | unsigned int pad:3; | ||
365 | } __attribute__((packed)); | ||
308 | 366 | ||
309 | #define SDVO_CMD_GET_TV_FORMAT 0x28 | 367 | #define SDVO_CMD_GET_TV_FORMAT 0x28 |
310 | 368 | ||
311 | #define SDVO_CMD_SET_TV_FORMAT 0x29 | 369 | #define SDVO_CMD_SET_TV_FORMAT 0x29 |
312 | 370 | ||
371 | /** Returns the resolutiosn that can be used with the given TV format */ | ||
372 | #define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83 | ||
373 | struct intel_sdvo_sdtv_resolution_request { | ||
374 | unsigned int ntsc_m:1; | ||
375 | unsigned int ntsc_j:1; | ||
376 | unsigned int ntsc_443:1; | ||
377 | unsigned int pal_b:1; | ||
378 | unsigned int pal_d:1; | ||
379 | unsigned int pal_g:1; | ||
380 | unsigned int pal_h:1; | ||
381 | unsigned int pal_i:1; | ||
382 | |||
383 | unsigned int pal_m:1; | ||
384 | unsigned int pal_n:1; | ||
385 | unsigned int pal_nc:1; | ||
386 | unsigned int pal_60:1; | ||
387 | unsigned int secam_b:1; | ||
388 | unsigned int secam_d:1; | ||
389 | unsigned int secam_g:1; | ||
390 | unsigned int secam_k:1; | ||
391 | |||
392 | unsigned int secam_k1:1; | ||
393 | unsigned int secam_l:1; | ||
394 | unsigned int secam_60:1; | ||
395 | unsigned int pad:5; | ||
396 | } __attribute__((packed)); | ||
397 | |||
398 | struct intel_sdvo_sdtv_resolution_reply { | ||
399 | unsigned int res_320x200:1; | ||
400 | unsigned int res_320x240:1; | ||
401 | unsigned int res_400x300:1; | ||
402 | unsigned int res_640x350:1; | ||
403 | unsigned int res_640x400:1; | ||
404 | unsigned int res_640x480:1; | ||
405 | unsigned int res_704x480:1; | ||
406 | unsigned int res_704x576:1; | ||
407 | |||
408 | unsigned int res_720x350:1; | ||
409 | unsigned int res_720x400:1; | ||
410 | unsigned int res_720x480:1; | ||
411 | unsigned int res_720x540:1; | ||
412 | unsigned int res_720x576:1; | ||
413 | unsigned int res_768x576:1; | ||
414 | unsigned int res_800x600:1; | ||
415 | unsigned int res_832x624:1; | ||
416 | |||
417 | unsigned int res_920x766:1; | ||
418 | unsigned int res_1024x768:1; | ||
419 | unsigned int res_1280x1024:1; | ||
420 | unsigned int pad:5; | ||
421 | } __attribute__((packed)); | ||
422 | |||
423 | /* Get supported resolution with squire pixel aspect ratio that can be | ||
424 | scaled for the requested HDTV format */ | ||
425 | #define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85 | ||
426 | |||
427 | struct intel_sdvo_hdtv_resolution_request { | ||
428 | unsigned int hdtv_std_smpte_240m_1080i_59:1; | ||
429 | unsigned int hdtv_std_smpte_240m_1080i_60:1; | ||
430 | unsigned int hdtv_std_smpte_260m_1080i_59:1; | ||
431 | unsigned int hdtv_std_smpte_260m_1080i_60:1; | ||
432 | unsigned int hdtv_std_smpte_274m_1080i_50:1; | ||
433 | unsigned int hdtv_std_smpte_274m_1080i_59:1; | ||
434 | unsigned int hdtv_std_smpte_274m_1080i_60:1; | ||
435 | unsigned int hdtv_std_smpte_274m_1080p_23:1; | ||
436 | |||
437 | unsigned int hdtv_std_smpte_274m_1080p_24:1; | ||
438 | unsigned int hdtv_std_smpte_274m_1080p_25:1; | ||
439 | unsigned int hdtv_std_smpte_274m_1080p_29:1; | ||
440 | unsigned int hdtv_std_smpte_274m_1080p_30:1; | ||
441 | unsigned int hdtv_std_smpte_274m_1080p_50:1; | ||
442 | unsigned int hdtv_std_smpte_274m_1080p_59:1; | ||
443 | unsigned int hdtv_std_smpte_274m_1080p_60:1; | ||
444 | unsigned int hdtv_std_smpte_295m_1080i_50:1; | ||
445 | |||
446 | unsigned int hdtv_std_smpte_295m_1080p_50:1; | ||
447 | unsigned int hdtv_std_smpte_296m_720p_59:1; | ||
448 | unsigned int hdtv_std_smpte_296m_720p_60:1; | ||
449 | unsigned int hdtv_std_smpte_296m_720p_50:1; | ||
450 | unsigned int hdtv_std_smpte_293m_480p_59:1; | ||
451 | unsigned int hdtv_std_smpte_170m_480i_59:1; | ||
452 | unsigned int hdtv_std_iturbt601_576i_50:1; | ||
453 | unsigned int hdtv_std_iturbt601_576p_50:1; | ||
454 | |||
455 | unsigned int hdtv_std_eia_7702a_480i_60:1; | ||
456 | unsigned int hdtv_std_eia_7702a_480p_60:1; | ||
457 | unsigned int pad:6; | ||
458 | } __attribute__((packed)); | ||
459 | |||
460 | struct intel_sdvo_hdtv_resolution_reply { | ||
461 | unsigned int res_640x480:1; | ||
462 | unsigned int res_800x600:1; | ||
463 | unsigned int res_1024x768:1; | ||
464 | unsigned int res_1280x960:1; | ||
465 | unsigned int res_1400x1050:1; | ||
466 | unsigned int res_1600x1200:1; | ||
467 | unsigned int res_1920x1440:1; | ||
468 | unsigned int res_2048x1536:1; | ||
469 | |||
470 | unsigned int res_2560x1920:1; | ||
471 | unsigned int res_3200x2400:1; | ||
472 | unsigned int res_3840x2880:1; | ||
473 | unsigned int pad1:5; | ||
474 | |||
475 | unsigned int res_848x480:1; | ||
476 | unsigned int res_1064x600:1; | ||
477 | unsigned int res_1280x720:1; | ||
478 | unsigned int res_1360x768:1; | ||
479 | unsigned int res_1704x960:1; | ||
480 | unsigned int res_1864x1050:1; | ||
481 | unsigned int res_1920x1080:1; | ||
482 | unsigned int res_2128x1200:1; | ||
483 | |||
484 | unsigned int res_2560x1400:1; | ||
485 | unsigned int res_2728x1536:1; | ||
486 | unsigned int res_3408x1920:1; | ||
487 | unsigned int res_4264x2400:1; | ||
488 | unsigned int res_5120x2880:1; | ||
489 | unsigned int pad2:3; | ||
490 | |||
491 | unsigned int res_768x480:1; | ||
492 | unsigned int res_960x600:1; | ||
493 | unsigned int res_1152x720:1; | ||
494 | unsigned int res_1124x768:1; | ||
495 | unsigned int res_1536x960:1; | ||
496 | unsigned int res_1680x1050:1; | ||
497 | unsigned int res_1728x1080:1; | ||
498 | unsigned int res_1920x1200:1; | ||
499 | |||
500 | unsigned int res_2304x1440:1; | ||
501 | unsigned int res_2456x1536:1; | ||
502 | unsigned int res_3072x1920:1; | ||
503 | unsigned int res_3840x2400:1; | ||
504 | unsigned int res_4608x2880:1; | ||
505 | unsigned int pad3:3; | ||
506 | |||
507 | unsigned int res_1280x1024:1; | ||
508 | unsigned int pad4:7; | ||
509 | |||
510 | unsigned int res_1280x768:1; | ||
511 | unsigned int pad5:7; | ||
512 | } __attribute__((packed)); | ||
513 | |||
514 | /* Get supported power state returns info for encoder and monitor, rely on | ||
515 | last SetTargetInput and SetTargetOutput calls */ | ||
313 | #define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a | 516 | #define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a |
517 | /* Get power state returns info for encoder and monitor, rely on last | ||
518 | SetTargetInput and SetTargetOutput calls */ | ||
519 | #define SDVO_CMD_GET_POWER_STATE 0x2b | ||
314 | #define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b | 520 | #define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b |
315 | #define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c | 521 | #define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c |
316 | # define SDVO_ENCODER_STATE_ON (1 << 0) | 522 | # define SDVO_ENCODER_STATE_ON (1 << 0) |
317 | # define SDVO_ENCODER_STATE_STANDBY (1 << 1) | 523 | # define SDVO_ENCODER_STATE_STANDBY (1 << 1) |
318 | # define SDVO_ENCODER_STATE_SUSPEND (1 << 2) | 524 | # define SDVO_ENCODER_STATE_SUSPEND (1 << 2) |
319 | # define SDVO_ENCODER_STATE_OFF (1 << 3) | 525 | # define SDVO_ENCODER_STATE_OFF (1 << 3) |
526 | # define SDVO_MONITOR_STATE_ON (1 << 4) | ||
527 | # define SDVO_MONITOR_STATE_STANDBY (1 << 5) | ||
528 | # define SDVO_MONITOR_STATE_SUSPEND (1 << 6) | ||
529 | # define SDVO_MONITOR_STATE_OFF (1 << 7) | ||
530 | |||
531 | #define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d | ||
532 | #define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e | ||
533 | #define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f | ||
534 | /** | ||
535 | * The panel power sequencing parameters are in units of milliseconds. | ||
536 | * The high fields are bits 8:9 of the 10-bit values. | ||
537 | */ | ||
538 | struct sdvo_panel_power_sequencing { | ||
539 | u8 t0; | ||
540 | u8 t1; | ||
541 | u8 t2; | ||
542 | u8 t3; | ||
543 | u8 t4; | ||
544 | |||
545 | unsigned int t0_high:2; | ||
546 | unsigned int t1_high:2; | ||
547 | unsigned int t2_high:2; | ||
548 | unsigned int t3_high:2; | ||
549 | |||
550 | unsigned int t4_high:2; | ||
551 | unsigned int pad:6; | ||
552 | } __attribute__((packed)); | ||
553 | |||
554 | #define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30 | ||
555 | struct sdvo_max_backlight_reply { | ||
556 | u8 max_value; | ||
557 | u8 default_value; | ||
558 | } __attribute__((packed)); | ||
559 | |||
560 | #define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31 | ||
561 | #define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32 | ||
562 | |||
563 | #define SDVO_CMD_GET_AMBIENT_LIGHT 0x33 | ||
564 | struct sdvo_get_ambient_light_reply { | ||
565 | u16 trip_low; | ||
566 | u16 trip_high; | ||
567 | u16 value; | ||
568 | } __attribute__((packed)); | ||
569 | #define SDVO_CMD_SET_AMBIENT_LIGHT 0x34 | ||
570 | struct sdvo_set_ambient_light_reply { | ||
571 | u16 trip_low; | ||
572 | u16 trip_high; | ||
573 | unsigned int enable:1; | ||
574 | unsigned int pad:7; | ||
575 | } __attribute__((packed)); | ||
576 | |||
577 | /* Set display power state */ | ||
578 | #define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d | ||
579 | # define SDVO_DISPLAY_STATE_ON (1 << 0) | ||
580 | # define SDVO_DISPLAY_STATE_STANDBY (1 << 1) | ||
581 | # define SDVO_DISPLAY_STATE_SUSPEND (1 << 2) | ||
582 | # define SDVO_DISPLAY_STATE_OFF (1 << 3) | ||
583 | |||
584 | #define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84 | ||
585 | struct intel_sdvo_enhancements_reply { | ||
586 | unsigned int flicker_filter:1; | ||
587 | unsigned int flicker_filter_adaptive:1; | ||
588 | unsigned int flicker_filter_2d:1; | ||
589 | unsigned int saturation:1; | ||
590 | unsigned int hue:1; | ||
591 | unsigned int brightness:1; | ||
592 | unsigned int contrast:1; | ||
593 | unsigned int overscan_h:1; | ||
594 | |||
595 | unsigned int overscan_v:1; | ||
596 | unsigned int position_h:1; | ||
597 | unsigned int position_v:1; | ||
598 | unsigned int sharpness:1; | ||
599 | unsigned int dot_crawl:1; | ||
600 | unsigned int dither:1; | ||
601 | unsigned int max_tv_chroma_filter:1; | ||
602 | unsigned int max_tv_luma_filter:1; | ||
603 | } __attribute__((packed)); | ||
604 | |||
605 | /* Picture enhancement limits below are dependent on the current TV format, | ||
606 | * and thus need to be queried and set after it. | ||
607 | */ | ||
608 | #define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d | ||
609 | #define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b | ||
610 | #define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52 | ||
611 | #define SDVO_CMD_GET_MAX_SATURATION 0x55 | ||
612 | #define SDVO_CMD_GET_MAX_HUE 0x58 | ||
613 | #define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b | ||
614 | #define SDVO_CMD_GET_MAX_CONTRAST 0x5e | ||
615 | #define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61 | ||
616 | #define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64 | ||
617 | #define SDVO_CMD_GET_MAX_POSITION_H 0x67 | ||
618 | #define SDVO_CMD_GET_MAX_POSITION_V 0x6a | ||
619 | #define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d | ||
620 | #define SDVO_CMD_GET_MAX_TV_CHROMA 0x74 | ||
621 | #define SDVO_CMD_GET_MAX_TV_LUMA 0x77 | ||
622 | struct intel_sdvo_enhancement_limits_reply { | ||
623 | u16 max_value; | ||
624 | u16 default_value; | ||
625 | } __attribute__((packed)); | ||
320 | 626 | ||
321 | #define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93 | 627 | #define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f |
628 | #define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80 | ||
629 | # define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0) | ||
630 | # define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0) | ||
631 | # define SDVO_LVDS_CONNECTOR_SPWG (0 << 2) | ||
632 | # define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2) | ||
633 | # define SDVO_LVDS_SINGLE_CHANNEL (0 << 4) | ||
634 | # define SDVO_LVDS_DUAL_CHANNEL (1 << 4) | ||
635 | |||
636 | #define SDVO_CMD_GET_FLICKER_FILTER 0x4e | ||
637 | #define SDVO_CMD_SET_FLICKER_FILTER 0x4f | ||
638 | #define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50 | ||
639 | #define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51 | ||
640 | #define SDVO_CMD_GET_2D_FLICKER_FITER 0x53 | ||
641 | #define SDVO_CMD_SET_2D_FLICKER_FITER 0x54 | ||
642 | #define SDVO_CMD_GET_SATURATION 0x56 | ||
643 | #define SDVO_CMD_SET_SATURATION 0x57 | ||
644 | #define SDVO_CMD_GET_HUE 0x59 | ||
645 | #define SDVO_CMD_SET_HUE 0x5a | ||
646 | #define SDVO_CMD_GET_BRIGHTNESS 0x5c | ||
647 | #define SDVO_CMD_SET_BRIGHTNESS 0x5d | ||
648 | #define SDVO_CMD_GET_CONTRAST 0x5f | ||
649 | #define SDVO_CMD_SET_CONTRAST 0x60 | ||
650 | #define SDVO_CMD_GET_OVERSCAN_H 0x62 | ||
651 | #define SDVO_CMD_SET_OVERSCAN_H 0x63 | ||
652 | #define SDVO_CMD_GET_OVERSCAN_V 0x65 | ||
653 | #define SDVO_CMD_SET_OVERSCAN_V 0x66 | ||
654 | #define SDVO_CMD_GET_POSITION_H 0x68 | ||
655 | #define SDVO_CMD_SET_POSITION_H 0x69 | ||
656 | #define SDVO_CMD_GET_POSITION_V 0x6b | ||
657 | #define SDVO_CMD_SET_POSITION_V 0x6c | ||
658 | #define SDVO_CMD_GET_SHARPNESS 0x6e | ||
659 | #define SDVO_CMD_SET_SHARPNESS 0x6f | ||
660 | #define SDVO_CMD_GET_TV_CHROMA 0x75 | ||
661 | #define SDVO_CMD_SET_TV_CHROMA 0x76 | ||
662 | #define SDVO_CMD_GET_TV_LUMA 0x78 | ||
663 | #define SDVO_CMD_SET_TV_LUMA 0x79 | ||
664 | struct intel_sdvo_enhancements_arg { | ||
665 | u16 value; | ||
666 | }__attribute__((packed)); | ||
667 | |||
668 | #define SDVO_CMD_GET_DOT_CRAWL 0x70 | ||
669 | #define SDVO_CMD_SET_DOT_CRAWL 0x71 | ||
670 | # define SDVO_DOT_CRAWL_ON (1 << 0) | ||
671 | # define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1) | ||
672 | |||
673 | #define SDVO_CMD_GET_DITHER 0x72 | ||
674 | #define SDVO_CMD_SET_DITHER 0x73 | ||
675 | # define SDVO_DITHER_ON (1 << 0) | ||
676 | # define SDVO_DITHER_DEFAULT_ON (1 << 1) | ||
322 | 677 | ||
323 | #define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a | 678 | #define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a |
324 | # define SDVO_CONTROL_BUS_PROM 0x0 | 679 | # define SDVO_CONTROL_BUS_PROM (1 << 0) |
325 | # define SDVO_CONTROL_BUS_DDC1 0x1 | 680 | # define SDVO_CONTROL_BUS_DDC1 (1 << 1) |
326 | # define SDVO_CONTROL_BUS_DDC2 0x2 | 681 | # define SDVO_CONTROL_BUS_DDC2 (1 << 2) |
327 | # define SDVO_CONTROL_BUS_DDC3 0x3 | 682 | # define SDVO_CONTROL_BUS_DDC3 (1 << 3) |
683 | |||
684 | /* HDMI op codes */ | ||
685 | #define SDVO_CMD_GET_SUPP_ENCODE 0x9d | ||
686 | #define SDVO_CMD_GET_ENCODE 0x9e | ||
687 | #define SDVO_CMD_SET_ENCODE 0x9f | ||
688 | #define SDVO_ENCODE_DVI 0x0 | ||
689 | #define SDVO_ENCODE_HDMI 0x1 | ||
690 | #define SDVO_CMD_SET_PIXEL_REPLI 0x8b | ||
691 | #define SDVO_CMD_GET_PIXEL_REPLI 0x8c | ||
692 | #define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d | ||
693 | #define SDVO_CMD_SET_COLORIMETRY 0x8e | ||
694 | #define SDVO_COLORIMETRY_RGB256 0x0 | ||
695 | #define SDVO_COLORIMETRY_RGB220 0x1 | ||
696 | #define SDVO_COLORIMETRY_YCrCb422 0x3 | ||
697 | #define SDVO_COLORIMETRY_YCrCb444 0x4 | ||
698 | #define SDVO_CMD_GET_COLORIMETRY 0x8f | ||
699 | #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90 | ||
700 | #define SDVO_CMD_SET_AUDIO_STAT 0x91 | ||
701 | #define SDVO_CMD_GET_AUDIO_STAT 0x92 | ||
702 | #define SDVO_CMD_SET_HBUF_INDEX 0x93 | ||
703 | #define SDVO_CMD_GET_HBUF_INDEX 0x94 | ||
704 | #define SDVO_CMD_GET_HBUF_INFO 0x95 | ||
705 | #define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96 | ||
706 | #define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97 | ||
707 | #define SDVO_CMD_SET_HBUF_DATA 0x98 | ||
708 | #define SDVO_CMD_GET_HBUF_DATA 0x99 | ||
709 | #define SDVO_CMD_SET_HBUF_TXRATE 0x9a | ||
710 | #define SDVO_CMD_GET_HBUF_TXRATE 0x9b | ||
711 | #define SDVO_HBUF_TX_DISABLED (0 << 6) | ||
712 | #define SDVO_HBUF_TX_ONCE (2 << 6) | ||
713 | #define SDVO_HBUF_TX_VSYNC (3 << 6) | ||
714 | #define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c | ||
715 | |||
716 | struct intel_sdvo_encode{ | ||
717 | u8 dvi_rev; | ||
718 | u8 hdmi_rev; | ||
719 | } __attribute__ ((packed)); | ||
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index fbb35dc56f5c..56485d67369b 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -411,7 +411,7 @@ struct tv_mode { | |||
411 | * These values account for -1s required. | 411 | * These values account for -1s required. |
412 | */ | 412 | */ |
413 | 413 | ||
414 | const static struct tv_mode tv_modes[] = { | 414 | static const struct tv_mode tv_modes[] = { |
415 | { | 415 | { |
416 | .name = "NTSC-M", | 416 | .name = "NTSC-M", |
417 | .clock = 107520, | 417 | .clock = 107520, |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 63212d7bbc28..92965dbb3c14 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -557,8 +557,10 @@ static int radeon_do_engine_reset(struct drm_device * dev) | |||
557 | } | 557 | } |
558 | 558 | ||
559 | static void radeon_cp_init_ring_buffer(struct drm_device * dev, | 559 | static void radeon_cp_init_ring_buffer(struct drm_device * dev, |
560 | drm_radeon_private_t * dev_priv) | 560 | drm_radeon_private_t *dev_priv, |
561 | struct drm_file *file_priv) | ||
561 | { | 562 | { |
563 | struct drm_radeon_master_private *master_priv; | ||
562 | u32 ring_start, cur_read_ptr; | 564 | u32 ring_start, cur_read_ptr; |
563 | u32 tmp; | 565 | u32 tmp; |
564 | 566 | ||
@@ -677,6 +679,14 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, | |||
677 | dev_priv->scratch[2] = 0; | 679 | dev_priv->scratch[2] = 0; |
678 | RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); | 680 | RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); |
679 | 681 | ||
682 | /* reset sarea copies of these */ | ||
683 | master_priv = file_priv->master->driver_priv; | ||
684 | if (master_priv->sarea_priv) { | ||
685 | master_priv->sarea_priv->last_frame = 0; | ||
686 | master_priv->sarea_priv->last_dispatch = 0; | ||
687 | master_priv->sarea_priv->last_clear = 0; | ||
688 | } | ||
689 | |||
680 | radeon_do_wait_for_idle(dev_priv); | 690 | radeon_do_wait_for_idle(dev_priv); |
681 | 691 | ||
682 | /* Sync everything up */ | 692 | /* Sync everything up */ |
@@ -1039,9 +1049,9 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, | |||
1039 | 1049 | ||
1040 | #if __OS_HAS_AGP | 1050 | #if __OS_HAS_AGP |
1041 | if (dev_priv->flags & RADEON_IS_AGP) { | 1051 | if (dev_priv->flags & RADEON_IS_AGP) { |
1042 | drm_core_ioremap(dev_priv->cp_ring, dev); | 1052 | drm_core_ioremap_wc(dev_priv->cp_ring, dev); |
1043 | drm_core_ioremap(dev_priv->ring_rptr, dev); | 1053 | drm_core_ioremap_wc(dev_priv->ring_rptr, dev); |
1044 | drm_core_ioremap(dev->agp_buffer_map, dev); | 1054 | drm_core_ioremap_wc(dev->agp_buffer_map, dev); |
1045 | if (!dev_priv->cp_ring->handle || | 1055 | if (!dev_priv->cp_ring->handle || |
1046 | !dev_priv->ring_rptr->handle || | 1056 | !dev_priv->ring_rptr->handle || |
1047 | !dev->agp_buffer_map->handle) { | 1057 | !dev->agp_buffer_map->handle) { |
@@ -1215,7 +1225,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, | |||
1215 | } | 1225 | } |
1216 | 1226 | ||
1217 | radeon_cp_load_microcode(dev_priv); | 1227 | radeon_cp_load_microcode(dev_priv); |
1218 | radeon_cp_init_ring_buffer(dev, dev_priv); | 1228 | radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); |
1219 | 1229 | ||
1220 | dev_priv->last_buf = 0; | 1230 | dev_priv->last_buf = 0; |
1221 | 1231 | ||
@@ -1281,7 +1291,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev) | |||
1281 | * | 1291 | * |
1282 | * Charl P. Botha <http://cpbotha.net> | 1292 | * Charl P. Botha <http://cpbotha.net> |
1283 | */ | 1293 | */ |
1284 | static int radeon_do_resume_cp(struct drm_device * dev) | 1294 | static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv) |
1285 | { | 1295 | { |
1286 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1296 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1287 | 1297 | ||
@@ -1304,7 +1314,7 @@ static int radeon_do_resume_cp(struct drm_device * dev) | |||
1304 | } | 1314 | } |
1305 | 1315 | ||
1306 | radeon_cp_load_microcode(dev_priv); | 1316 | radeon_cp_load_microcode(dev_priv); |
1307 | radeon_cp_init_ring_buffer(dev, dev_priv); | 1317 | radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); |
1308 | 1318 | ||
1309 | radeon_do_engine_reset(dev); | 1319 | radeon_do_engine_reset(dev); |
1310 | radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); | 1320 | radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); |
@@ -1479,8 +1489,7 @@ int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_pri | |||
1479 | */ | 1489 | */ |
1480 | int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) | 1490 | int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) |
1481 | { | 1491 | { |
1482 | 1492 | return radeon_do_resume_cp(dev, file_priv); | |
1483 | return radeon_do_resume_cp(dev); | ||
1484 | } | 1493 | } |
1485 | 1494 | ||
1486 | int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) | 1495 | int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) |