diff options
author | Dave Airlie <airlied@redhat.com> | 2014-04-02 17:51:54 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-04-02 17:51:54 -0400 |
commit | 66e514c14a1cb9c2540c685c40d94dc6ef6b6bb5 (patch) | |
tree | b21a13e80e7a27e8829fa1f1693c46bf2cab4507 | |
parent | 2844ea3f252331cc0ecf3ae74f6226db2f580f8a (diff) | |
parent | 698b3135acb94e838a33a69f1a7a684fe0d90734 (diff) |
Merge tag 'drm-intel-next-2014-03-21' of git://anongit.freedesktop.org/drm-intel into drm-next
- Inherit/reuse firmwar framebuffers (for real this time) from Jesse, less
flicker for fastbooting.
- More flexible cloning for hdmi (Ville).
- Some PPGTT fixes from Ben.
- Ring init fixes from Naresh Kumar.
- set_cache_level regression fixes for the vma conversion from Ville&Chris.
- Conversion to the new dp aux helpers (Jani).
- Unification of runtime pm with pc8 support from Paulo, prep work for runtime
pm on other platforms than HSW.
- Larger cursor sizes (Sagar Kamble).
- Piles of improvements and fixes all over, as usual.
* tag 'drm-intel-next-2014-03-21' of git://anongit.freedesktop.org/drm-intel: (75 commits)
drm/i915: Include a note about the dangers of I915_READ64/I915_WRITE64
drm/i915/sdvo: fix questionable return value check
drm/i915: Fix unsafe loop iteration over vma whilst unbinding them
drm/i915: Enabling 128x128 and 256x256 ARGB Cursor Support
drm/i915: Print how many objects are shared in per-process stats
drm/i915: Per-process stats work better when evaluated per-process
drm/i915: remove rps local variables
drm/i915: Remove extraneous MMIO for RPS
drm/i915: Rename and comment all the RPS *stuff*
drm/i915: Store the HW min frequency as min_freq
drm/i915: Fix coding style for RPS
drm/i915: Reorganize the overclock code
drm/i915: init pm.suspended earlier
drm/i915: update the PC8 and runtime PM documentation
drm/i915: rename __hsw_do_{en, dis}able_pc8
drm/i915: kill struct i915_package_c8
drm/i915: move pc8.irqs_disabled to pm.irqs_disabled
drm/i915: remove dev_priv->pc8.enabled
drm/i915: don't get/put PC8 when getting/putting power wells
drm/i915: make intel_aux_display_runtime_get get runtime PM, not PC8
...
Conflicts:
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
32 files changed, 1308 insertions, 930 deletions
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 74724aacb8ae..f4babed2f557 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
@@ -726,7 +726,8 @@ int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux) | |||
726 | aux->ddc.dev.parent = aux->dev; | 726 | aux->ddc.dev.parent = aux->dev; |
727 | aux->ddc.dev.of_node = aux->dev->of_node; | 727 | aux->ddc.dev.of_node = aux->dev->of_node; |
728 | 728 | ||
729 | strncpy(aux->ddc.name, dev_name(aux->dev), sizeof(aux->ddc.name)); | 729 | strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), |
730 | sizeof(aux->ddc.name)); | ||
730 | 731 | ||
731 | return i2c_add_adapter(&aux->ddc); | 732 | return i2c_add_adapter(&aux->ddc); |
732 | } | 733 | } |
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 69c1287f1262..4cf6d020d513 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c | |||
@@ -402,7 +402,7 @@ int i915_parse_cmds(struct intel_ring_buffer *ring, | |||
402 | length = ((*cmd & desc->length.mask) + LENGTH_BIAS); | 402 | length = ((*cmd & desc->length.mask) + LENGTH_BIAS); |
403 | 403 | ||
404 | if ((batch_end - cmd) < length) { | 404 | if ((batch_end - cmd) < length) { |
405 | DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%ld\n", | 405 | DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%td\n", |
406 | *cmd, | 406 | *cmd, |
407 | length, | 407 | length, |
408 | (unsigned long)(batch_end - cmd)); | 408 | (unsigned long)(batch_end - cmd)); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index cacd4974cf23..d04786db9627 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -299,28 +299,62 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) | |||
299 | } while (0) | 299 | } while (0) |
300 | 300 | ||
301 | struct file_stats { | 301 | struct file_stats { |
302 | struct drm_i915_file_private *file_priv; | ||
302 | int count; | 303 | int count; |
303 | size_t total, active, inactive, unbound; | 304 | size_t total, unbound; |
305 | size_t global, shared; | ||
306 | size_t active, inactive; | ||
304 | }; | 307 | }; |
305 | 308 | ||
306 | static int per_file_stats(int id, void *ptr, void *data) | 309 | static int per_file_stats(int id, void *ptr, void *data) |
307 | { | 310 | { |
308 | struct drm_i915_gem_object *obj = ptr; | 311 | struct drm_i915_gem_object *obj = ptr; |
309 | struct file_stats *stats = data; | 312 | struct file_stats *stats = data; |
313 | struct i915_vma *vma; | ||
310 | 314 | ||
311 | stats->count++; | 315 | stats->count++; |
312 | stats->total += obj->base.size; | 316 | stats->total += obj->base.size; |
313 | 317 | ||
314 | if (i915_gem_obj_ggtt_bound(obj)) { | 318 | if (obj->base.name || obj->base.dma_buf) |
315 | if (!list_empty(&obj->ring_list)) | 319 | stats->shared += obj->base.size; |
316 | stats->active += obj->base.size; | 320 | |
317 | else | 321 | if (USES_FULL_PPGTT(obj->base.dev)) { |
318 | stats->inactive += obj->base.size; | 322 | list_for_each_entry(vma, &obj->vma_list, vma_link) { |
323 | struct i915_hw_ppgtt *ppgtt; | ||
324 | |||
325 | if (!drm_mm_node_allocated(&vma->node)) | ||
326 | continue; | ||
327 | |||
328 | if (i915_is_ggtt(vma->vm)) { | ||
329 | stats->global += obj->base.size; | ||
330 | continue; | ||
331 | } | ||
332 | |||
333 | ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); | ||
334 | if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv) | ||
335 | continue; | ||
336 | |||
337 | if (obj->ring) /* XXX per-vma statistic */ | ||
338 | stats->active += obj->base.size; | ||
339 | else | ||
340 | stats->inactive += obj->base.size; | ||
341 | |||
342 | return 0; | ||
343 | } | ||
319 | } else { | 344 | } else { |
320 | if (!list_empty(&obj->global_list)) | 345 | if (i915_gem_obj_ggtt_bound(obj)) { |
321 | stats->unbound += obj->base.size; | 346 | stats->global += obj->base.size; |
347 | if (obj->ring) | ||
348 | stats->active += obj->base.size; | ||
349 | else | ||
350 | stats->inactive += obj->base.size; | ||
351 | return 0; | ||
352 | } | ||
322 | } | 353 | } |
323 | 354 | ||
355 | if (!list_empty(&obj->global_list)) | ||
356 | stats->unbound += obj->base.size; | ||
357 | |||
324 | return 0; | 358 | return 0; |
325 | } | 359 | } |
326 | 360 | ||
@@ -411,6 +445,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
411 | struct task_struct *task; | 445 | struct task_struct *task; |
412 | 446 | ||
413 | memset(&stats, 0, sizeof(stats)); | 447 | memset(&stats, 0, sizeof(stats)); |
448 | stats.file_priv = file->driver_priv; | ||
414 | idr_for_each(&file->object_idr, per_file_stats, &stats); | 449 | idr_for_each(&file->object_idr, per_file_stats, &stats); |
415 | /* | 450 | /* |
416 | * Although we have a valid reference on file->pid, that does | 451 | * Although we have a valid reference on file->pid, that does |
@@ -420,12 +455,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
420 | */ | 455 | */ |
421 | rcu_read_lock(); | 456 | rcu_read_lock(); |
422 | task = pid_task(file->pid, PIDTYPE_PID); | 457 | task = pid_task(file->pid, PIDTYPE_PID); |
423 | seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", | 458 | seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", |
424 | task ? task->comm : "<unknown>", | 459 | task ? task->comm : "<unknown>", |
425 | stats.count, | 460 | stats.count, |
426 | stats.total, | 461 | stats.total, |
427 | stats.active, | 462 | stats.active, |
428 | stats.inactive, | 463 | stats.inactive, |
464 | stats.global, | ||
465 | stats.shared, | ||
429 | stats.unbound); | 466 | stats.unbound); |
430 | rcu_read_unlock(); | 467 | rcu_read_unlock(); |
431 | } | 468 | } |
@@ -1026,7 +1063,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
1026 | max_freq * GT_FREQUENCY_MULTIPLIER); | 1063 | max_freq * GT_FREQUENCY_MULTIPLIER); |
1027 | 1064 | ||
1028 | seq_printf(m, "Max overclocked frequency: %dMHz\n", | 1065 | seq_printf(m, "Max overclocked frequency: %dMHz\n", |
1029 | dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); | 1066 | dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER); |
1030 | } else if (IS_VALLEYVIEW(dev)) { | 1067 | } else if (IS_VALLEYVIEW(dev)) { |
1031 | u32 freq_sts, val; | 1068 | u32 freq_sts, val; |
1032 | 1069 | ||
@@ -1498,8 +1535,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
1498 | 1535 | ||
1499 | seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); | 1536 | seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); |
1500 | 1537 | ||
1501 | for (gpu_freq = dev_priv->rps.min_delay; | 1538 | for (gpu_freq = dev_priv->rps.min_freq_softlimit; |
1502 | gpu_freq <= dev_priv->rps.max_delay; | 1539 | gpu_freq <= dev_priv->rps.max_freq_softlimit; |
1503 | gpu_freq++) { | 1540 | gpu_freq++) { |
1504 | ia_freq = gpu_freq; | 1541 | ia_freq = gpu_freq; |
1505 | sandybridge_pcode_read(dev_priv, | 1542 | sandybridge_pcode_read(dev_priv, |
@@ -2012,15 +2049,9 @@ static int i915_pc8_status(struct seq_file *m, void *unused) | |||
2012 | return 0; | 2049 | return 0; |
2013 | } | 2050 | } |
2014 | 2051 | ||
2015 | mutex_lock(&dev_priv->pc8.lock); | ||
2016 | seq_printf(m, "Requirements met: %s\n", | ||
2017 | yesno(dev_priv->pc8.requirements_met)); | ||
2018 | seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); | 2052 | seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); |
2019 | seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count); | ||
2020 | seq_printf(m, "IRQs disabled: %s\n", | 2053 | seq_printf(m, "IRQs disabled: %s\n", |
2021 | yesno(dev_priv->pc8.irqs_disabled)); | 2054 | yesno(dev_priv->pm.irqs_disabled)); |
2022 | seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled)); | ||
2023 | mutex_unlock(&dev_priv->pc8.lock); | ||
2024 | 2055 | ||
2025 | return 0; | 2056 | return 0; |
2026 | } | 2057 | } |
@@ -2248,24 +2279,67 @@ static void intel_connector_info(struct seq_file *m, | |||
2248 | intel_seq_print_mode(m, 2, mode); | 2279 | intel_seq_print_mode(m, 2, mode); |
2249 | } | 2280 | } |
2250 | 2281 | ||
2282 | static bool cursor_active(struct drm_device *dev, int pipe) | ||
2283 | { | ||
2284 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2285 | u32 state; | ||
2286 | |||
2287 | if (IS_845G(dev) || IS_I865G(dev)) | ||
2288 | state = I915_READ(_CURACNTR) & CURSOR_ENABLE; | ||
2289 | else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) | ||
2290 | state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; | ||
2291 | else | ||
2292 | state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; | ||
2293 | |||
2294 | return state; | ||
2295 | } | ||
2296 | |||
2297 | static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) | ||
2298 | { | ||
2299 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2300 | u32 pos; | ||
2301 | |||
2302 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) | ||
2303 | pos = I915_READ(CURPOS_IVB(pipe)); | ||
2304 | else | ||
2305 | pos = I915_READ(CURPOS(pipe)); | ||
2306 | |||
2307 | *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; | ||
2308 | if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) | ||
2309 | *x = -*x; | ||
2310 | |||
2311 | *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; | ||
2312 | if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) | ||
2313 | *y = -*y; | ||
2314 | |||
2315 | return cursor_active(dev, pipe); | ||
2316 | } | ||
2317 | |||
2251 | static int i915_display_info(struct seq_file *m, void *unused) | 2318 | static int i915_display_info(struct seq_file *m, void *unused) |
2252 | { | 2319 | { |
2253 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 2320 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
2254 | struct drm_device *dev = node->minor->dev; | 2321 | struct drm_device *dev = node->minor->dev; |
2255 | struct drm_crtc *crtc; | 2322 | struct intel_crtc *crtc; |
2256 | struct drm_connector *connector; | 2323 | struct drm_connector *connector; |
2257 | 2324 | ||
2258 | drm_modeset_lock_all(dev); | 2325 | drm_modeset_lock_all(dev); |
2259 | seq_printf(m, "CRTC info\n"); | 2326 | seq_printf(m, "CRTC info\n"); |
2260 | seq_printf(m, "---------\n"); | 2327 | seq_printf(m, "---------\n"); |
2261 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 2328 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { |
2262 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2329 | bool active; |
2330 | int x, y; | ||
2263 | 2331 | ||
2264 | seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", | 2332 | seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", |
2265 | crtc->base.id, pipe_name(intel_crtc->pipe), | 2333 | crtc->base.base.id, pipe_name(crtc->pipe), |
2266 | intel_crtc->active ? "yes" : "no"); | 2334 | yesno(crtc->active)); |
2267 | if (intel_crtc->active) | 2335 | if (crtc->active) |
2268 | intel_crtc_info(m, intel_crtc); | 2336 | intel_crtc_info(m, crtc); |
2337 | |||
2338 | active = cursor_position(dev, crtc->pipe, &x, &y); | ||
2339 | seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n", | ||
2340 | yesno(crtc->cursor_visible), | ||
2341 | x, y, crtc->cursor_addr, | ||
2342 | yesno(active)); | ||
2269 | } | 2343 | } |
2270 | 2344 | ||
2271 | seq_printf(m, "\n"); | 2345 | seq_printf(m, "\n"); |
@@ -2603,8 +2677,6 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, | |||
2603 | if (need_stable_symbols) { | 2677 | if (need_stable_symbols) { |
2604 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); | 2678 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); |
2605 | 2679 | ||
2606 | WARN_ON(!IS_G4X(dev)); | ||
2607 | |||
2608 | tmp |= DC_BALANCE_RESET_VLV; | 2680 | tmp |= DC_BALANCE_RESET_VLV; |
2609 | if (pipe == PIPE_A) | 2681 | if (pipe == PIPE_A) |
2610 | tmp |= PIPE_A_SCRAMBLE_RESET; | 2682 | tmp |= PIPE_A_SCRAMBLE_RESET; |
@@ -3414,9 +3486,9 @@ i915_max_freq_get(void *data, u64 *val) | |||
3414 | return ret; | 3486 | return ret; |
3415 | 3487 | ||
3416 | if (IS_VALLEYVIEW(dev)) | 3488 | if (IS_VALLEYVIEW(dev)) |
3417 | *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); | 3489 | *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); |
3418 | else | 3490 | else |
3419 | *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; | 3491 | *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; |
3420 | mutex_unlock(&dev_priv->rps.hw_lock); | 3492 | mutex_unlock(&dev_priv->rps.hw_lock); |
3421 | 3493 | ||
3422 | return 0; | 3494 | return 0; |
@@ -3453,16 +3525,16 @@ i915_max_freq_set(void *data, u64 val) | |||
3453 | do_div(val, GT_FREQUENCY_MULTIPLIER); | 3525 | do_div(val, GT_FREQUENCY_MULTIPLIER); |
3454 | 3526 | ||
3455 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 3527 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
3456 | hw_max = dev_priv->rps.hw_max; | 3528 | hw_max = dev_priv->rps.max_freq; |
3457 | hw_min = (rp_state_cap >> 16) & 0xff; | 3529 | hw_min = (rp_state_cap >> 16) & 0xff; |
3458 | } | 3530 | } |
3459 | 3531 | ||
3460 | if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { | 3532 | if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { |
3461 | mutex_unlock(&dev_priv->rps.hw_lock); | 3533 | mutex_unlock(&dev_priv->rps.hw_lock); |
3462 | return -EINVAL; | 3534 | return -EINVAL; |
3463 | } | 3535 | } |
3464 | 3536 | ||
3465 | dev_priv->rps.max_delay = val; | 3537 | dev_priv->rps.max_freq_softlimit = val; |
3466 | 3538 | ||
3467 | if (IS_VALLEYVIEW(dev)) | 3539 | if (IS_VALLEYVIEW(dev)) |
3468 | valleyview_set_rps(dev, val); | 3540 | valleyview_set_rps(dev, val); |
@@ -3495,9 +3567,9 @@ i915_min_freq_get(void *data, u64 *val) | |||
3495 | return ret; | 3567 | return ret; |
3496 | 3568 | ||
3497 | if (IS_VALLEYVIEW(dev)) | 3569 | if (IS_VALLEYVIEW(dev)) |
3498 | *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); | 3570 | *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); |
3499 | else | 3571 | else |
3500 | *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; | 3572 | *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; |
3501 | mutex_unlock(&dev_priv->rps.hw_lock); | 3573 | mutex_unlock(&dev_priv->rps.hw_lock); |
3502 | 3574 | ||
3503 | return 0; | 3575 | return 0; |
@@ -3534,16 +3606,16 @@ i915_min_freq_set(void *data, u64 val) | |||
3534 | do_div(val, GT_FREQUENCY_MULTIPLIER); | 3606 | do_div(val, GT_FREQUENCY_MULTIPLIER); |
3535 | 3607 | ||
3536 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 3608 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
3537 | hw_max = dev_priv->rps.hw_max; | 3609 | hw_max = dev_priv->rps.max_freq; |
3538 | hw_min = (rp_state_cap >> 16) & 0xff; | 3610 | hw_min = (rp_state_cap >> 16) & 0xff; |
3539 | } | 3611 | } |
3540 | 3612 | ||
3541 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { | 3613 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { |
3542 | mutex_unlock(&dev_priv->rps.hw_lock); | 3614 | mutex_unlock(&dev_priv->rps.hw_lock); |
3543 | return -EINVAL; | 3615 | return -EINVAL; |
3544 | } | 3616 | } |
3545 | 3617 | ||
3546 | dev_priv->rps.min_delay = val; | 3618 | dev_priv->rps.min_freq_softlimit = val; |
3547 | 3619 | ||
3548 | if (IS_VALLEYVIEW(dev)) | 3620 | if (IS_VALLEYVIEW(dev)) |
3549 | valleyview_set_rps(dev, val); | 3621 | valleyview_set_rps(dev, val); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index e4d2b9f15ae2..4e0a26a83500 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1187,6 +1187,9 @@ intel_setup_mchbar(struct drm_device *dev) | |||
1187 | u32 temp; | 1187 | u32 temp; |
1188 | bool enabled; | 1188 | bool enabled; |
1189 | 1189 | ||
1190 | if (IS_VALLEYVIEW(dev)) | ||
1191 | return; | ||
1192 | |||
1190 | dev_priv->mchbar_need_disable = false; | 1193 | dev_priv->mchbar_need_disable = false; |
1191 | 1194 | ||
1192 | if (IS_I915G(dev) || IS_I915GM(dev)) { | 1195 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
@@ -1608,8 +1611,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1608 | goto put_bridge; | 1611 | goto put_bridge; |
1609 | } | 1612 | } |
1610 | 1613 | ||
1611 | intel_uncore_early_sanitize(dev); | ||
1612 | |||
1613 | /* This must be called before any calls to HAS_PCH_* */ | 1614 | /* This must be called before any calls to HAS_PCH_* */ |
1614 | intel_detect_pch(dev); | 1615 | intel_detect_pch(dev); |
1615 | 1616 | ||
@@ -1822,8 +1823,6 @@ int i915_driver_unload(struct drm_device *dev) | |||
1822 | cancel_work_sync(&dev_priv->gpu_error.work); | 1823 | cancel_work_sync(&dev_priv->gpu_error.work); |
1823 | i915_destroy_error_state(dev); | 1824 | i915_destroy_error_state(dev); |
1824 | 1825 | ||
1825 | cancel_delayed_work_sync(&dev_priv->pc8.enable_work); | ||
1826 | |||
1827 | if (dev->pdev->msi_enabled) | 1826 | if (dev->pdev->msi_enabled) |
1828 | pci_disable_msi(dev->pdev); | 1827 | pci_disable_msi(dev->pdev); |
1829 | 1828 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 658fe24961eb..fa5d0ed76378 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -428,7 +428,6 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
428 | 428 | ||
429 | /* We do a lot of poking in a lot of registers, make sure they work | 429 | /* We do a lot of poking in a lot of registers, make sure they work |
430 | * properly. */ | 430 | * properly. */ |
431 | hsw_disable_package_c8(dev_priv); | ||
432 | intel_display_set_init_power(dev_priv, true); | 431 | intel_display_set_init_power(dev_priv, true); |
433 | 432 | ||
434 | drm_kms_helper_poll_disable(dev); | 433 | drm_kms_helper_poll_disable(dev); |
@@ -467,6 +466,7 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
467 | i915_save_state(dev); | 466 | i915_save_state(dev); |
468 | 467 | ||
469 | intel_opregion_fini(dev); | 468 | intel_opregion_fini(dev); |
469 | intel_uncore_fini(dev); | ||
470 | 470 | ||
471 | console_lock(); | 471 | console_lock(); |
472 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); | 472 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); |
@@ -603,10 +603,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) | |||
603 | schedule_work(&dev_priv->console_resume_work); | 603 | schedule_work(&dev_priv->console_resume_work); |
604 | } | 604 | } |
605 | 605 | ||
606 | /* Undo what we did at i915_drm_freeze so the refcount goes back to the | ||
607 | * expected level. */ | ||
608 | hsw_enable_package_c8(dev_priv); | ||
609 | |||
610 | mutex_lock(&dev_priv->modeset_restore_lock); | 606 | mutex_lock(&dev_priv->modeset_restore_lock); |
611 | dev_priv->modeset_restore = MODESET_DONE; | 607 | dev_priv->modeset_restore = MODESET_DONE; |
612 | mutex_unlock(&dev_priv->modeset_restore_lock); | 608 | mutex_unlock(&dev_priv->modeset_restore_lock); |
@@ -848,6 +844,9 @@ static int i915_runtime_suspend(struct device *device) | |||
848 | 844 | ||
849 | DRM_DEBUG_KMS("Suspending device\n"); | 845 | DRM_DEBUG_KMS("Suspending device\n"); |
850 | 846 | ||
847 | if (HAS_PC8(dev)) | ||
848 | hsw_enable_pc8(dev_priv); | ||
849 | |||
851 | i915_gem_release_all_mmaps(dev_priv); | 850 | i915_gem_release_all_mmaps(dev_priv); |
852 | 851 | ||
853 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); | 852 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
@@ -862,6 +861,7 @@ static int i915_runtime_suspend(struct device *device) | |||
862 | */ | 861 | */ |
863 | intel_opregion_notify_adapter(dev, PCI_D1); | 862 | intel_opregion_notify_adapter(dev, PCI_D1); |
864 | 863 | ||
864 | DRM_DEBUG_KMS("Device suspended\n"); | ||
865 | return 0; | 865 | return 0; |
866 | } | 866 | } |
867 | 867 | ||
@@ -878,6 +878,10 @@ static int i915_runtime_resume(struct device *device) | |||
878 | intel_opregion_notify_adapter(dev, PCI_D0); | 878 | intel_opregion_notify_adapter(dev, PCI_D0); |
879 | dev_priv->pm.suspended = false; | 879 | dev_priv->pm.suspended = false; |
880 | 880 | ||
881 | if (HAS_PC8(dev)) | ||
882 | hsw_disable_pc8(dev_priv); | ||
883 | |||
884 | DRM_DEBUG_KMS("Device resumed\n"); | ||
881 | return 0; | 885 | return 0; |
882 | } | 886 | } |
883 | 887 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2a319ba91a71..3f62be0fb5c5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -406,6 +406,7 @@ struct drm_i915_error_state { | |||
406 | 406 | ||
407 | struct intel_connector; | 407 | struct intel_connector; |
408 | struct intel_crtc_config; | 408 | struct intel_crtc_config; |
409 | struct intel_plane_config; | ||
409 | struct intel_crtc; | 410 | struct intel_crtc; |
410 | struct intel_limit; | 411 | struct intel_limit; |
411 | struct dpll; | 412 | struct dpll; |
@@ -444,6 +445,8 @@ struct drm_i915_display_funcs { | |||
444 | * fills out the pipe-config with the hw state. */ | 445 | * fills out the pipe-config with the hw state. */ |
445 | bool (*get_pipe_config)(struct intel_crtc *, | 446 | bool (*get_pipe_config)(struct intel_crtc *, |
446 | struct intel_crtc_config *); | 447 | struct intel_crtc_config *); |
448 | void (*get_plane_config)(struct intel_crtc *, | ||
449 | struct intel_plane_config *); | ||
447 | int (*crtc_mode_set)(struct drm_crtc *crtc, | 450 | int (*crtc_mode_set)(struct drm_crtc *crtc, |
448 | int x, int y, | 451 | int x, int y, |
449 | struct drm_framebuffer *old_fb); | 452 | struct drm_framebuffer *old_fb); |
@@ -459,8 +462,9 @@ struct drm_i915_display_funcs { | |||
459 | struct drm_framebuffer *fb, | 462 | struct drm_framebuffer *fb, |
460 | struct drm_i915_gem_object *obj, | 463 | struct drm_i915_gem_object *obj, |
461 | uint32_t flags); | 464 | uint32_t flags); |
462 | int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, | 465 | int (*update_primary_plane)(struct drm_crtc *crtc, |
463 | int x, int y); | 466 | struct drm_framebuffer *fb, |
467 | int x, int y); | ||
464 | void (*hpd_irq_setup)(struct drm_device *dev); | 468 | void (*hpd_irq_setup)(struct drm_device *dev); |
465 | /* clock updates for mode set */ | 469 | /* clock updates for mode set */ |
466 | /* cursor updates */ | 470 | /* cursor updates */ |
@@ -721,6 +725,8 @@ struct i915_hw_ppgtt { | |||
721 | dma_addr_t *gen8_pt_dma_addr[4]; | 725 | dma_addr_t *gen8_pt_dma_addr[4]; |
722 | }; | 726 | }; |
723 | 727 | ||
728 | struct i915_hw_context *ctx; | ||
729 | |||
724 | int (*enable)(struct i915_hw_ppgtt *ppgtt); | 730 | int (*enable)(struct i915_hw_ppgtt *ppgtt); |
725 | int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, | 731 | int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, |
726 | struct intel_ring_buffer *ring, | 732 | struct intel_ring_buffer *ring, |
@@ -976,13 +982,24 @@ struct intel_gen6_power_mgmt { | |||
976 | struct work_struct work; | 982 | struct work_struct work; |
977 | u32 pm_iir; | 983 | u32 pm_iir; |
978 | 984 | ||
979 | u8 cur_delay; | 985 | /* Frequencies are stored in potentially platform dependent multiples. |
980 | u8 min_delay; | 986 | * In other words, *_freq needs to be multiplied by X to be interesting. |
981 | u8 max_delay; | 987 | * Soft limits are those which are used for the dynamic reclocking done |
982 | u8 rpe_delay; | 988 | * by the driver (raise frequencies under heavy loads, and lower for |
983 | u8 rp1_delay; | 989 | * lighter loads). Hard limits are those imposed by the hardware. |
984 | u8 rp0_delay; | 990 | * |
985 | u8 hw_max; | 991 | * A distinction is made for overclocking, which is never enabled by |
992 | * default, and is considered to be above the hard limit if it's | ||
993 | * possible at all. | ||
994 | */ | ||
995 | u8 cur_freq; /* Current frequency (cached, may not == HW) */ | ||
996 | u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ | ||
997 | u8 max_freq_softlimit; /* Max frequency permitted by the driver */ | ||
998 | u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ | ||
999 | u8 min_freq; /* AKA RPn. Minimum frequency */ | ||
1000 | u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ | ||
1001 | u8 rp1_freq; /* "less than" RP0 power/freqency */ | ||
1002 | u8 rp0_freq; /* Non-overclocked max frequency. */ | ||
986 | 1003 | ||
987 | bool rp_up_masked; | 1004 | bool rp_up_masked; |
988 | bool rp_down_masked; | 1005 | bool rp_down_masked; |
@@ -1333,43 +1350,19 @@ struct ilk_wm_values { | |||
1333 | }; | 1350 | }; |
1334 | 1351 | ||
1335 | /* | 1352 | /* |
1336 | * This struct tracks the state needed for the Package C8+ feature. | 1353 | * This struct helps tracking the state needed for runtime PM, which puts the |
1337 | * | 1354 | * device in PCI D3 state. Notice that when this happens, nothing on the |
1338 | * Package states C8 and deeper are really deep PC states that can only be | 1355 | * graphics device works, even register access, so we don't get interrupts nor |
1339 | * reached when all the devices on the system allow it, so even if the graphics | 1356 | * anything else. |
1340 | * device allows PC8+, it doesn't mean the system will actually get to these | ||
1341 | * states. | ||
1342 | * | ||
1343 | * Our driver only allows PC8+ when all the outputs are disabled, the power well | ||
1344 | * is disabled and the GPU is idle. When these conditions are met, we manually | ||
1345 | * do the other conditions: disable the interrupts, clocks and switch LCPLL | ||
1346 | * refclk to Fclk. | ||
1347 | * | 1357 | * |
1348 | * When we really reach PC8 or deeper states (not just when we allow it) we lose | 1358 | * Every piece of our code that needs to actually touch the hardware needs to |
1349 | * the state of some registers, so when we come back from PC8+ we need to | 1359 | * either call intel_runtime_pm_get or call intel_display_power_get with the |
1350 | * restore this state. We don't get into PC8+ if we're not in RC6, so we don't | 1360 | * appropriate power domain. |
1351 | * need to take care of the registers kept by RC6. | ||
1352 | * | 1361 | * |
1353 | * The interrupt disabling is part of the requirements. We can only leave the | 1362 | * Our driver uses the autosuspend delay feature, which means we'll only really |
1354 | * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we | 1363 | * suspend if we stay with zero refcount for a certain amount of time. The |
1355 | * can lock the machine. | 1364 | * default value is currently very conservative (see intel_init_runtime_pm), but |
1356 | * | 1365 | * it can be changed with the standard runtime PM files from sysfs. |
1357 | * Ideally every piece of our code that needs PC8+ disabled would call | ||
1358 | * hsw_disable_package_c8, which would increment disable_count and prevent the | ||
1359 | * system from reaching PC8+. But we don't have a symmetric way to do this for | ||
1360 | * everything, so we have the requirements_met variable. When we switch | ||
1361 | * requirements_met to true we decrease disable_count, and increase it in the | ||
1362 | * opposite case. The requirements_met variable is true when all the CRTCs, | ||
1363 | * encoders and the power well are disabled. | ||
1364 | * | ||
1365 | * In addition to everything, we only actually enable PC8+ if disable_count | ||
1366 | * stays at zero for at least some seconds. This is implemented with the | ||
1367 | * enable_work variable. We do this so we don't enable/disable PC8 dozens of | ||
1368 | * consecutive times when all screens are disabled and some background app | ||
1369 | * queries the state of our connectors, or we have some application constantly | ||
1370 | * waking up to use the GPU. Only after the enable_work function actually | ||
1371 | * enables PC8+ the "enable" variable will become true, which means that it can | ||
1372 | * be false even if disable_count is 0. | ||
1373 | * | 1366 | * |
1374 | * The irqs_disabled variable becomes true exactly after we disable the IRQs and | 1367 | * The irqs_disabled variable becomes true exactly after we disable the IRQs and |
1375 | * goes back to false exactly before we reenable the IRQs. We use this variable | 1368 | * goes back to false exactly before we reenable the IRQs. We use this variable |
@@ -1379,16 +1372,11 @@ struct ilk_wm_values { | |||
1379 | * inside struct regsave so when we restore the IRQs they will contain the | 1372 | * inside struct regsave so when we restore the IRQs they will contain the |
1380 | * latest expected values. | 1373 | * latest expected values. |
1381 | * | 1374 | * |
1382 | * For more, read "Display Sequences for Package C8" on our documentation. | 1375 | * For more, read the Documentation/power/runtime_pm.txt. |
1383 | */ | 1376 | */ |
1384 | struct i915_package_c8 { | 1377 | struct i915_runtime_pm { |
1385 | bool requirements_met; | 1378 | bool suspended; |
1386 | bool irqs_disabled; | 1379 | bool irqs_disabled; |
1387 | /* Only true after the delayed work task actually enables it. */ | ||
1388 | bool enabled; | ||
1389 | int disable_count; | ||
1390 | struct mutex lock; | ||
1391 | struct delayed_work enable_work; | ||
1392 | 1380 | ||
1393 | struct { | 1381 | struct { |
1394 | uint32_t deimr; | 1382 | uint32_t deimr; |
@@ -1399,10 +1387,6 @@ struct i915_package_c8 { | |||
1399 | } regsave; | 1387 | } regsave; |
1400 | }; | 1388 | }; |
1401 | 1389 | ||
1402 | struct i915_runtime_pm { | ||
1403 | bool suspended; | ||
1404 | }; | ||
1405 | |||
1406 | enum intel_pipe_crc_source { | 1390 | enum intel_pipe_crc_source { |
1407 | INTEL_PIPE_CRC_SOURCE_NONE, | 1391 | INTEL_PIPE_CRC_SOURCE_NONE, |
1408 | INTEL_PIPE_CRC_SOURCE_PLANE1, | 1392 | INTEL_PIPE_CRC_SOURCE_PLANE1, |
@@ -1610,6 +1594,7 @@ typedef struct drm_i915_private { | |||
1610 | 1594 | ||
1611 | u32 fdi_rx_config; | 1595 | u32 fdi_rx_config; |
1612 | 1596 | ||
1597 | u32 suspend_count; | ||
1613 | struct i915_suspend_saved_registers regfile; | 1598 | struct i915_suspend_saved_registers regfile; |
1614 | 1599 | ||
1615 | struct { | 1600 | struct { |
@@ -1629,8 +1614,6 @@ typedef struct drm_i915_private { | |||
1629 | struct ilk_wm_values hw; | 1614 | struct ilk_wm_values hw; |
1630 | } wm; | 1615 | } wm; |
1631 | 1616 | ||
1632 | struct i915_package_c8 pc8; | ||
1633 | |||
1634 | struct i915_runtime_pm pm; | 1617 | struct i915_runtime_pm pm; |
1635 | 1618 | ||
1636 | /* Old dri1 support infrastructure, beware the dragons ya fools entering | 1619 | /* Old dri1 support infrastructure, beware the dragons ya fools entering |
@@ -1638,8 +1621,6 @@ typedef struct drm_i915_private { | |||
1638 | struct i915_dri1_state dri1; | 1621 | struct i915_dri1_state dri1; |
1639 | /* Old ums support infrastructure, same warning applies. */ | 1622 | /* Old ums support infrastructure, same warning applies. */ |
1640 | struct i915_ums_state ums; | 1623 | struct i915_ums_state ums; |
1641 | |||
1642 | u32 suspend_count; | ||
1643 | } drm_i915_private_t; | 1624 | } drm_i915_private_t; |
1644 | 1625 | ||
1645 | static inline struct drm_i915_private *to_i915(const struct drm_device *dev) | 1626 | static inline struct drm_i915_private *to_i915(const struct drm_device *dev) |
@@ -2092,8 +2073,6 @@ struct i915_params { | |||
2092 | unsigned int preliminary_hw_support; | 2073 | unsigned int preliminary_hw_support; |
2093 | int disable_power_well; | 2074 | int disable_power_well; |
2094 | int enable_ips; | 2075 | int enable_ips; |
2095 | int enable_pc8; | ||
2096 | int pc8_timeout; | ||
2097 | int invert_brightness; | 2076 | int invert_brightness; |
2098 | int enable_cmd_parser; | 2077 | int enable_cmd_parser; |
2099 | /* leave bools at the end to not create holes */ | 2078 | /* leave bools at the end to not create holes */ |
@@ -2757,6 +2736,12 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); | |||
2757 | #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) | 2736 | #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) |
2758 | #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) | 2737 | #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) |
2759 | 2738 | ||
2739 | /* Be very careful with read/write 64-bit values. On 32-bit machines, they | ||
2740 | * will be implemented using 2 32-bit writes in an arbitrary order with | ||
2741 | * an arbitrary delay between them. This can cause the hardware to | ||
2742 | * act upon the intermediate value, possibly leading to corruption and | ||
2743 | * machine death. You have been warned. | ||
2744 | */ | ||
2760 | #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) | 2745 | #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) |
2761 | #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) | 2746 | #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) |
2762 | 2747 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9c52f68df66c..33bbaa0d4412 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -510,12 +510,10 @@ i915_gem_shmem_pread(struct drm_device *dev, | |||
510 | 510 | ||
511 | mutex_lock(&dev->struct_mutex); | 511 | mutex_lock(&dev->struct_mutex); |
512 | 512 | ||
513 | next_page: | ||
514 | mark_page_accessed(page); | ||
515 | |||
516 | if (ret) | 513 | if (ret) |
517 | goto out; | 514 | goto out; |
518 | 515 | ||
516 | next_page: | ||
519 | remain -= page_length; | 517 | remain -= page_length; |
520 | user_data += page_length; | 518 | user_data += page_length; |
521 | offset += page_length; | 519 | offset += page_length; |
@@ -695,9 +693,8 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, | |||
695 | if (needs_clflush_before) | 693 | if (needs_clflush_before) |
696 | drm_clflush_virt_range(vaddr + shmem_page_offset, | 694 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
697 | page_length); | 695 | page_length); |
698 | ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, | 696 | ret = __copy_from_user_inatomic(vaddr + shmem_page_offset, |
699 | user_data, | 697 | user_data, page_length); |
700 | page_length); | ||
701 | if (needs_clflush_after) | 698 | if (needs_clflush_after) |
702 | drm_clflush_virt_range(vaddr + shmem_page_offset, | 699 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
703 | page_length); | 700 | page_length); |
@@ -831,13 +828,10 @@ i915_gem_shmem_pwrite(struct drm_device *dev, | |||
831 | 828 | ||
832 | mutex_lock(&dev->struct_mutex); | 829 | mutex_lock(&dev->struct_mutex); |
833 | 830 | ||
834 | next_page: | ||
835 | set_page_dirty(page); | ||
836 | mark_page_accessed(page); | ||
837 | |||
838 | if (ret) | 831 | if (ret) |
839 | goto out; | 832 | goto out; |
840 | 833 | ||
834 | next_page: | ||
841 | remain -= page_length; | 835 | remain -= page_length; |
842 | user_data += page_length; | 836 | user_data += page_length; |
843 | offset += page_length; | 837 | offset += page_length; |
@@ -1041,7 +1035,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
1041 | unsigned long timeout_expire; | 1035 | unsigned long timeout_expire; |
1042 | int ret; | 1036 | int ret; |
1043 | 1037 | ||
1044 | WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); | 1038 | WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n"); |
1045 | 1039 | ||
1046 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) | 1040 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
1047 | return 0; | 1041 | return 0; |
@@ -3473,7 +3467,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
3473 | enum i915_cache_level cache_level) | 3467 | enum i915_cache_level cache_level) |
3474 | { | 3468 | { |
3475 | struct drm_device *dev = obj->base.dev; | 3469 | struct drm_device *dev = obj->base.dev; |
3476 | struct i915_vma *vma; | 3470 | struct i915_vma *vma, *next; |
3477 | int ret; | 3471 | int ret; |
3478 | 3472 | ||
3479 | if (obj->cache_level == cache_level) | 3473 | if (obj->cache_level == cache_level) |
@@ -3484,13 +3478,11 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
3484 | return -EBUSY; | 3478 | return -EBUSY; |
3485 | } | 3479 | } |
3486 | 3480 | ||
3487 | list_for_each_entry(vma, &obj->vma_list, vma_link) { | 3481 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
3488 | if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { | 3482 | if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { |
3489 | ret = i915_vma_unbind(vma); | 3483 | ret = i915_vma_unbind(vma); |
3490 | if (ret) | 3484 | if (ret) |
3491 | return ret; | 3485 | return ret; |
3492 | |||
3493 | break; | ||
3494 | } | 3486 | } |
3495 | } | 3487 | } |
3496 | 3488 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index ce41cff84346..6043062ffce7 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -215,6 +215,7 @@ create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx) | |||
215 | return ERR_PTR(ret); | 215 | return ERR_PTR(ret); |
216 | } | 216 | } |
217 | 217 | ||
218 | ppgtt->ctx = ctx; | ||
218 | return ppgtt; | 219 | return ppgtt; |
219 | } | 220 | } |
220 | 221 | ||
@@ -775,9 +776,11 @@ int i915_switch_context(struct intel_ring_buffer *ring, | |||
775 | 776 | ||
776 | BUG_ON(file && to == NULL); | 777 | BUG_ON(file && to == NULL); |
777 | 778 | ||
778 | /* We have the fake context, but don't supports switching. */ | 779 | /* We have the fake context */ |
779 | if (!HAS_HW_CONTEXTS(ring->dev)) | 780 | if (!HAS_HW_CONTEXTS(ring->dev)) { |
781 | ring->last_context = to; | ||
780 | return 0; | 782 | return 0; |
783 | } | ||
781 | 784 | ||
782 | return do_switch(ring, to); | 785 | return do_switch(ring, to); |
783 | } | 786 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 63a6dc7a6bb6..ee535514aa41 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include "i915_trace.h" | 30 | #include "i915_trace.h" |
31 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
32 | 32 | ||
33 | static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv); | ||
34 | |||
33 | bool intel_enable_ppgtt(struct drm_device *dev, bool full) | 35 | bool intel_enable_ppgtt(struct drm_device *dev, bool full) |
34 | { | 36 | { |
35 | if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) | 37 | if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) |
@@ -1191,9 +1193,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
1191 | ppgtt->base.clear_range = gen6_ppgtt_clear_range; | 1193 | ppgtt->base.clear_range = gen6_ppgtt_clear_range; |
1192 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; | 1194 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; |
1193 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; | 1195 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; |
1194 | ppgtt->base.scratch = dev_priv->gtt.base.scratch; | ||
1195 | ppgtt->base.start = 0; | 1196 | ppgtt->base.start = 0; |
1196 | ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; | 1197 | ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; |
1197 | ppgtt->debug_dump = gen6_dump_ppgtt; | 1198 | ppgtt->debug_dump = gen6_dump_ppgtt; |
1198 | 1199 | ||
1199 | ppgtt->pd_offset = | 1200 | ppgtt->pd_offset = |
@@ -1214,6 +1215,7 @@ int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) | |||
1214 | int ret = 0; | 1215 | int ret = 0; |
1215 | 1216 | ||
1216 | ppgtt->base.dev = dev; | 1217 | ppgtt->base.dev = dev; |
1218 | ppgtt->base.scratch = dev_priv->gtt.base.scratch; | ||
1217 | 1219 | ||
1218 | if (INTEL_INFO(dev)->gen < 8) | 1220 | if (INTEL_INFO(dev)->gen < 8) |
1219 | ret = gen6_ppgtt_init(ppgtt); | 1221 | ret = gen6_ppgtt_init(ppgtt); |
@@ -1243,8 +1245,6 @@ ppgtt_bind_vma(struct i915_vma *vma, | |||
1243 | enum i915_cache_level cache_level, | 1245 | enum i915_cache_level cache_level, |
1244 | u32 flags) | 1246 | u32 flags) |
1245 | { | 1247 | { |
1246 | WARN_ON(flags); | ||
1247 | |||
1248 | vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, | 1248 | vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, |
1249 | cache_level); | 1249 | cache_level); |
1250 | } | 1250 | } |
@@ -1372,8 +1372,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) | |||
1372 | } | 1372 | } |
1373 | 1373 | ||
1374 | 1374 | ||
1375 | if (INTEL_INFO(dev)->gen >= 8) | 1375 | if (INTEL_INFO(dev)->gen >= 8) { |
1376 | gen8_setup_private_ppat(dev_priv); | ||
1376 | return; | 1377 | return; |
1378 | } | ||
1377 | 1379 | ||
1378 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { | 1380 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
1379 | /* TODO: Perhaps it shouldn't be gen6 specific */ | 1381 | /* TODO: Perhaps it shouldn't be gen6 specific */ |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 144a5e2bc7ef..baf1ca690dc5 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -850,10 +850,12 @@ static void i915_record_ring_state(struct drm_device *dev, | |||
850 | } | 850 | } |
851 | break; | 851 | break; |
852 | case 7: | 852 | case 7: |
853 | ering->vm_info.pp_dir_base = RING_PP_DIR_BASE(ring); | 853 | ering->vm_info.pp_dir_base = |
854 | I915_READ(RING_PP_DIR_BASE(ring)); | ||
854 | break; | 855 | break; |
855 | case 6: | 856 | case 6: |
856 | ering->vm_info.pp_dir_base = RING_PP_DIR_BASE_READ(ring); | 857 | ering->vm_info.pp_dir_base = |
858 | I915_READ(RING_PP_DIR_BASE_READ(ring)); | ||
857 | break; | 859 | break; |
858 | } | 860 | } |
859 | } | 861 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 15d5e61eb415..acebe511e4ef 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -86,9 +86,9 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
86 | { | 86 | { |
87 | assert_spin_locked(&dev_priv->irq_lock); | 87 | assert_spin_locked(&dev_priv->irq_lock); |
88 | 88 | ||
89 | if (dev_priv->pc8.irqs_disabled) { | 89 | if (dev_priv->pm.irqs_disabled) { |
90 | WARN(1, "IRQs disabled\n"); | 90 | WARN(1, "IRQs disabled\n"); |
91 | dev_priv->pc8.regsave.deimr &= ~mask; | 91 | dev_priv->pm.regsave.deimr &= ~mask; |
92 | return; | 92 | return; |
93 | } | 93 | } |
94 | 94 | ||
@@ -104,9 +104,9 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
104 | { | 104 | { |
105 | assert_spin_locked(&dev_priv->irq_lock); | 105 | assert_spin_locked(&dev_priv->irq_lock); |
106 | 106 | ||
107 | if (dev_priv->pc8.irqs_disabled) { | 107 | if (dev_priv->pm.irqs_disabled) { |
108 | WARN(1, "IRQs disabled\n"); | 108 | WARN(1, "IRQs disabled\n"); |
109 | dev_priv->pc8.regsave.deimr |= mask; | 109 | dev_priv->pm.regsave.deimr |= mask; |
110 | return; | 110 | return; |
111 | } | 111 | } |
112 | 112 | ||
@@ -129,10 +129,10 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, | |||
129 | { | 129 | { |
130 | assert_spin_locked(&dev_priv->irq_lock); | 130 | assert_spin_locked(&dev_priv->irq_lock); |
131 | 131 | ||
132 | if (dev_priv->pc8.irqs_disabled) { | 132 | if (dev_priv->pm.irqs_disabled) { |
133 | WARN(1, "IRQs disabled\n"); | 133 | WARN(1, "IRQs disabled\n"); |
134 | dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; | 134 | dev_priv->pm.regsave.gtimr &= ~interrupt_mask; |
135 | dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & | 135 | dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask & |
136 | interrupt_mask); | 136 | interrupt_mask); |
137 | return; | 137 | return; |
138 | } | 138 | } |
@@ -167,10 +167,10 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, | |||
167 | 167 | ||
168 | assert_spin_locked(&dev_priv->irq_lock); | 168 | assert_spin_locked(&dev_priv->irq_lock); |
169 | 169 | ||
170 | if (dev_priv->pc8.irqs_disabled) { | 170 | if (dev_priv->pm.irqs_disabled) { |
171 | WARN(1, "IRQs disabled\n"); | 171 | WARN(1, "IRQs disabled\n"); |
172 | dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; | 172 | dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask; |
173 | dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & | 173 | dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask & |
174 | interrupt_mask); | 174 | interrupt_mask); |
175 | return; | 175 | return; |
176 | } | 176 | } |
@@ -313,11 +313,11 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, | |||
313 | 313 | ||
314 | assert_spin_locked(&dev_priv->irq_lock); | 314 | assert_spin_locked(&dev_priv->irq_lock); |
315 | 315 | ||
316 | if (dev_priv->pc8.irqs_disabled && | 316 | if (dev_priv->pm.irqs_disabled && |
317 | (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { | 317 | (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { |
318 | WARN(1, "IRQs disabled\n"); | 318 | WARN(1, "IRQs disabled\n"); |
319 | dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; | 319 | dev_priv->pm.regsave.sdeimr &= ~interrupt_mask; |
320 | dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & | 320 | dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask & |
321 | interrupt_mask); | 321 | interrupt_mask); |
322 | return; | 322 | return; |
323 | } | 323 | } |
@@ -1075,7 +1075,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv, | |||
1075 | u32 pm_iir, int new_delay) | 1075 | u32 pm_iir, int new_delay) |
1076 | { | 1076 | { |
1077 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { | 1077 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { |
1078 | if (new_delay >= dev_priv->rps.max_delay) { | 1078 | if (new_delay >= dev_priv->rps.max_freq_softlimit) { |
1079 | /* Mask UP THRESHOLD Interrupts */ | 1079 | /* Mask UP THRESHOLD Interrupts */ |
1080 | I915_WRITE(GEN6_PMINTRMSK, | 1080 | I915_WRITE(GEN6_PMINTRMSK, |
1081 | I915_READ(GEN6_PMINTRMSK) | | 1081 | I915_READ(GEN6_PMINTRMSK) | |
@@ -1090,7 +1090,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv, | |||
1090 | dev_priv->rps.rp_down_masked = false; | 1090 | dev_priv->rps.rp_down_masked = false; |
1091 | } | 1091 | } |
1092 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { | 1092 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { |
1093 | if (new_delay <= dev_priv->rps.min_delay) { | 1093 | if (new_delay <= dev_priv->rps.min_freq_softlimit) { |
1094 | /* Mask DOWN THRESHOLD Interrupts */ | 1094 | /* Mask DOWN THRESHOLD Interrupts */ |
1095 | I915_WRITE(GEN6_PMINTRMSK, | 1095 | I915_WRITE(GEN6_PMINTRMSK, |
1096 | I915_READ(GEN6_PMINTRMSK) | | 1096 | I915_READ(GEN6_PMINTRMSK) | |
@@ -1136,38 +1136,39 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1136 | adj *= 2; | 1136 | adj *= 2; |
1137 | else | 1137 | else |
1138 | adj = 1; | 1138 | adj = 1; |
1139 | new_delay = dev_priv->rps.cur_delay + adj; | 1139 | new_delay = dev_priv->rps.cur_freq + adj; |
1140 | 1140 | ||
1141 | /* | 1141 | /* |
1142 | * For better performance, jump directly | 1142 | * For better performance, jump directly |
1143 | * to RPe if we're below it. | 1143 | * to RPe if we're below it. |
1144 | */ | 1144 | */ |
1145 | if (new_delay < dev_priv->rps.rpe_delay) | 1145 | if (new_delay < dev_priv->rps.efficient_freq) |
1146 | new_delay = dev_priv->rps.rpe_delay; | 1146 | new_delay = dev_priv->rps.efficient_freq; |
1147 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { | 1147 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { |
1148 | if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) | 1148 | if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) |
1149 | new_delay = dev_priv->rps.rpe_delay; | 1149 | new_delay = dev_priv->rps.efficient_freq; |
1150 | else | 1150 | else |
1151 | new_delay = dev_priv->rps.min_delay; | 1151 | new_delay = dev_priv->rps.min_freq_softlimit; |
1152 | adj = 0; | 1152 | adj = 0; |
1153 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { | 1153 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { |
1154 | if (adj < 0) | 1154 | if (adj < 0) |
1155 | adj *= 2; | 1155 | adj *= 2; |
1156 | else | 1156 | else |
1157 | adj = -1; | 1157 | adj = -1; |
1158 | new_delay = dev_priv->rps.cur_delay + adj; | 1158 | new_delay = dev_priv->rps.cur_freq + adj; |
1159 | } else { /* unknown event */ | 1159 | } else { /* unknown event */ |
1160 | new_delay = dev_priv->rps.cur_delay; | 1160 | new_delay = dev_priv->rps.cur_freq; |
1161 | } | 1161 | } |
1162 | 1162 | ||
1163 | /* sysfs frequency interfaces may have snuck in while servicing the | 1163 | /* sysfs frequency interfaces may have snuck in while servicing the |
1164 | * interrupt | 1164 | * interrupt |
1165 | */ | 1165 | */ |
1166 | new_delay = clamp_t(int, new_delay, | 1166 | new_delay = clamp_t(int, new_delay, |
1167 | dev_priv->rps.min_delay, dev_priv->rps.max_delay); | 1167 | dev_priv->rps.min_freq_softlimit, |
1168 | dev_priv->rps.max_freq_softlimit); | ||
1168 | 1169 | ||
1169 | gen6_set_pm_mask(dev_priv, pm_iir, new_delay); | 1170 | gen6_set_pm_mask(dev_priv, pm_iir, new_delay); |
1170 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; | 1171 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; |
1171 | 1172 | ||
1172 | if (IS_VALLEYVIEW(dev_priv->dev)) | 1173 | if (IS_VALLEYVIEW(dev_priv->dev)) |
1173 | valleyview_set_rps(dev_priv->dev, new_delay); | 1174 | valleyview_set_rps(dev_priv->dev, new_delay); |
@@ -3074,7 +3075,7 @@ static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) | |||
3074 | 3075 | ||
3075 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | | 3076 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | |
3076 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 3077 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3077 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; | 3078 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; |
3078 | 3079 | ||
3079 | dev_priv->irq_mask |= iir_mask; | 3080 | dev_priv->irq_mask |= iir_mask; |
3080 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | 3081 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); |
@@ -4118,32 +4119,32 @@ void intel_hpd_init(struct drm_device *dev) | |||
4118 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 4119 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
4119 | } | 4120 | } |
4120 | 4121 | ||
4121 | /* Disable interrupts so we can allow Package C8+. */ | 4122 | /* Disable interrupts so we can allow runtime PM. */ |
4122 | void hsw_pc8_disable_interrupts(struct drm_device *dev) | 4123 | void hsw_runtime_pm_disable_interrupts(struct drm_device *dev) |
4123 | { | 4124 | { |
4124 | struct drm_i915_private *dev_priv = dev->dev_private; | 4125 | struct drm_i915_private *dev_priv = dev->dev_private; |
4125 | unsigned long irqflags; | 4126 | unsigned long irqflags; |
4126 | 4127 | ||
4127 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 4128 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
4128 | 4129 | ||
4129 | dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); | 4130 | dev_priv->pm.regsave.deimr = I915_READ(DEIMR); |
4130 | dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); | 4131 | dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR); |
4131 | dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); | 4132 | dev_priv->pm.regsave.gtimr = I915_READ(GTIMR); |
4132 | dev_priv->pc8.regsave.gtier = I915_READ(GTIER); | 4133 | dev_priv->pm.regsave.gtier = I915_READ(GTIER); |
4133 | dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); | 4134 | dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); |
4134 | 4135 | ||
4135 | ironlake_disable_display_irq(dev_priv, 0xffffffff); | 4136 | ironlake_disable_display_irq(dev_priv, 0xffffffff); |
4136 | ibx_disable_display_interrupt(dev_priv, 0xffffffff); | 4137 | ibx_disable_display_interrupt(dev_priv, 0xffffffff); |
4137 | ilk_disable_gt_irq(dev_priv, 0xffffffff); | 4138 | ilk_disable_gt_irq(dev_priv, 0xffffffff); |
4138 | snb_disable_pm_irq(dev_priv, 0xffffffff); | 4139 | snb_disable_pm_irq(dev_priv, 0xffffffff); |
4139 | 4140 | ||
4140 | dev_priv->pc8.irqs_disabled = true; | 4141 | dev_priv->pm.irqs_disabled = true; |
4141 | 4142 | ||
4142 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 4143 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
4143 | } | 4144 | } |
4144 | 4145 | ||
4145 | /* Restore interrupts so we can recover from Package C8+. */ | 4146 | /* Restore interrupts so we can recover from runtime PM. */ |
4146 | void hsw_pc8_restore_interrupts(struct drm_device *dev) | 4147 | void hsw_runtime_pm_restore_interrupts(struct drm_device *dev) |
4147 | { | 4148 | { |
4148 | struct drm_i915_private *dev_priv = dev->dev_private; | 4149 | struct drm_i915_private *dev_priv = dev->dev_private; |
4149 | unsigned long irqflags; | 4150 | unsigned long irqflags; |
@@ -4163,13 +4164,13 @@ void hsw_pc8_restore_interrupts(struct drm_device *dev) | |||
4163 | val = I915_READ(GEN6_PMIMR); | 4164 | val = I915_READ(GEN6_PMIMR); |
4164 | WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); | 4165 | WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); |
4165 | 4166 | ||
4166 | dev_priv->pc8.irqs_disabled = false; | 4167 | dev_priv->pm.irqs_disabled = false; |
4167 | 4168 | ||
4168 | ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); | 4169 | ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr); |
4169 | ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr); | 4170 | ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr); |
4170 | ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); | 4171 | ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr); |
4171 | snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); | 4172 | snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr); |
4172 | I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); | 4173 | I915_WRITE(GTIER, dev_priv->pm.regsave.gtier); |
4173 | 4174 | ||
4174 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 4175 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
4175 | } | 4176 | } |
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index a66ffb652bee..d1d7980f0e01 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
@@ -42,8 +42,6 @@ struct i915_params i915 __read_mostly = { | |||
42 | .disable_power_well = 1, | 42 | .disable_power_well = 1, |
43 | .enable_ips = 1, | 43 | .enable_ips = 1, |
44 | .fastboot = 0, | 44 | .fastboot = 0, |
45 | .enable_pc8 = 1, | ||
46 | .pc8_timeout = 5000, | ||
47 | .prefault_disable = 0, | 45 | .prefault_disable = 0, |
48 | .reset = true, | 46 | .reset = true, |
49 | .invert_brightness = 0, | 47 | .invert_brightness = 0, |
@@ -135,14 +133,6 @@ module_param_named(fastboot, i915.fastboot, bool, 0600); | |||
135 | MODULE_PARM_DESC(fastboot, | 133 | MODULE_PARM_DESC(fastboot, |
136 | "Try to skip unnecessary mode sets at boot time (default: false)"); | 134 | "Try to skip unnecessary mode sets at boot time (default: false)"); |
137 | 135 | ||
138 | module_param_named(enable_pc8, i915.enable_pc8, int, 0600); | ||
139 | MODULE_PARM_DESC(enable_pc8, | ||
140 | "Enable support for low power package C states (PC8+) (default: true)"); | ||
141 | |||
142 | module_param_named(pc8_timeout, i915.pc8_timeout, int, 0600); | ||
143 | MODULE_PARM_DESC(pc8_timeout, | ||
144 | "Number of msecs of idleness required to enter PC8+ (default: 5000)"); | ||
145 | |||
146 | module_param_named(prefault_disable, i915.prefault_disable, bool, 0600); | 136 | module_param_named(prefault_disable, i915.prefault_disable, bool, 0600); |
147 | MODULE_PARM_DESC(prefault_disable, | 137 | MODULE_PARM_DESC(prefault_disable, |
148 | "Disable page prefaulting for pread/pwrite/reloc (default:false). " | 138 | "Disable page prefaulting for pread/pwrite/reloc (default:false). " |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 146609ab42bb..74f7d853eb58 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -748,6 +748,7 @@ enum punit_power_well { | |||
748 | #define RING_INSTPS(base) ((base)+0x70) | 748 | #define RING_INSTPS(base) ((base)+0x70) |
749 | #define RING_DMA_FADD(base) ((base)+0x78) | 749 | #define RING_DMA_FADD(base) ((base)+0x78) |
750 | #define RING_INSTPM(base) ((base)+0xc0) | 750 | #define RING_INSTPM(base) ((base)+0xc0) |
751 | #define RING_MI_MODE(base) ((base)+0x9c) | ||
751 | #define INSTPS 0x02070 /* 965+ only */ | 752 | #define INSTPS 0x02070 /* 965+ only */ |
752 | #define INSTDONE1 0x0207c /* 965+ only */ | 753 | #define INSTDONE1 0x0207c /* 965+ only */ |
753 | #define ACTHD_I965 0x02074 | 754 | #define ACTHD_I965 0x02074 |
@@ -824,6 +825,7 @@ enum punit_power_well { | |||
824 | # define VS_TIMER_DISPATCH (1 << 6) | 825 | # define VS_TIMER_DISPATCH (1 << 6) |
825 | # define MI_FLUSH_ENABLE (1 << 12) | 826 | # define MI_FLUSH_ENABLE (1 << 12) |
826 | # define ASYNC_FLIP_PERF_DISABLE (1 << 14) | 827 | # define ASYNC_FLIP_PERF_DISABLE (1 << 14) |
828 | # define MODE_IDLE (1 << 9) | ||
827 | 829 | ||
828 | #define GEN6_GT_MODE 0x20d0 | 830 | #define GEN6_GT_MODE 0x20d0 |
829 | #define GEN7_GT_MODE 0x7008 | 831 | #define GEN7_GT_MODE 0x7008 |
@@ -3551,7 +3553,11 @@ enum punit_power_well { | |||
3551 | /* New style CUR*CNTR flags */ | 3553 | /* New style CUR*CNTR flags */ |
3552 | #define CURSOR_MODE 0x27 | 3554 | #define CURSOR_MODE 0x27 |
3553 | #define CURSOR_MODE_DISABLE 0x00 | 3555 | #define CURSOR_MODE_DISABLE 0x00 |
3556 | #define CURSOR_MODE_128_32B_AX 0x02 | ||
3557 | #define CURSOR_MODE_256_32B_AX 0x03 | ||
3554 | #define CURSOR_MODE_64_32B_AX 0x07 | 3558 | #define CURSOR_MODE_64_32B_AX 0x07 |
3559 | #define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX) | ||
3560 | #define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX) | ||
3555 | #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) | 3561 | #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) |
3556 | #define MCURSOR_PIPE_SELECT (1 << 28) | 3562 | #define MCURSOR_PIPE_SELECT (1 << 28) |
3557 | #define MCURSOR_PIPE_A 0x00 | 3563 | #define MCURSOR_PIPE_A 0x00 |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 0c741f4eefb0..9c57029f6f4b 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, | |||
269 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | 269 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); |
270 | ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); | 270 | ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); |
271 | } else { | 271 | } else { |
272 | ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; | 272 | ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER; |
273 | } | 273 | } |
274 | mutex_unlock(&dev_priv->rps.hw_lock); | 274 | mutex_unlock(&dev_priv->rps.hw_lock); |
275 | 275 | ||
@@ -284,7 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, | |||
284 | struct drm_i915_private *dev_priv = dev->dev_private; | 284 | struct drm_i915_private *dev_priv = dev->dev_private; |
285 | 285 | ||
286 | return snprintf(buf, PAGE_SIZE, "%d\n", | 286 | return snprintf(buf, PAGE_SIZE, "%d\n", |
287 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay)); | 287 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); |
288 | } | 288 | } |
289 | 289 | ||
290 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | 290 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
@@ -298,9 +298,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute | |||
298 | 298 | ||
299 | mutex_lock(&dev_priv->rps.hw_lock); | 299 | mutex_lock(&dev_priv->rps.hw_lock); |
300 | if (IS_VALLEYVIEW(dev_priv->dev)) | 300 | if (IS_VALLEYVIEW(dev_priv->dev)) |
301 | ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); | 301 | ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); |
302 | else | 302 | else |
303 | ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; | 303 | ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; |
304 | mutex_unlock(&dev_priv->rps.hw_lock); | 304 | mutex_unlock(&dev_priv->rps.hw_lock); |
305 | 305 | ||
306 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); | 306 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
@@ -313,7 +313,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
313 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 313 | struct drm_minor *minor = dev_to_drm_minor(kdev); |
314 | struct drm_device *dev = minor->dev; | 314 | struct drm_device *dev = minor->dev; |
315 | struct drm_i915_private *dev_priv = dev->dev_private; | 315 | struct drm_i915_private *dev_priv = dev->dev_private; |
316 | u32 val, rp_state_cap, hw_max, hw_min, non_oc_max; | 316 | u32 val; |
317 | ssize_t ret; | 317 | ssize_t ret; |
318 | 318 | ||
319 | ret = kstrtou32(buf, 0, &val); | 319 | ret = kstrtou32(buf, 0, &val); |
@@ -324,44 +324,35 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
324 | 324 | ||
325 | mutex_lock(&dev_priv->rps.hw_lock); | 325 | mutex_lock(&dev_priv->rps.hw_lock); |
326 | 326 | ||
327 | if (IS_VALLEYVIEW(dev_priv->dev)) { | 327 | if (IS_VALLEYVIEW(dev_priv->dev)) |
328 | val = vlv_freq_opcode(dev_priv, val); | 328 | val = vlv_freq_opcode(dev_priv, val); |
329 | 329 | else | |
330 | hw_max = valleyview_rps_max_freq(dev_priv); | ||
331 | hw_min = valleyview_rps_min_freq(dev_priv); | ||
332 | non_oc_max = hw_max; | ||
333 | } else { | ||
334 | val /= GT_FREQUENCY_MULTIPLIER; | 330 | val /= GT_FREQUENCY_MULTIPLIER; |
335 | 331 | ||
336 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 332 | if (val < dev_priv->rps.min_freq || |
337 | hw_max = dev_priv->rps.hw_max; | 333 | val > dev_priv->rps.max_freq || |
338 | non_oc_max = (rp_state_cap & 0xff); | 334 | val < dev_priv->rps.min_freq_softlimit) { |
339 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | ||
340 | } | ||
341 | |||
342 | if (val < hw_min || val > hw_max || | ||
343 | val < dev_priv->rps.min_delay) { | ||
344 | mutex_unlock(&dev_priv->rps.hw_lock); | 335 | mutex_unlock(&dev_priv->rps.hw_lock); |
345 | return -EINVAL; | 336 | return -EINVAL; |
346 | } | 337 | } |
347 | 338 | ||
348 | if (val > non_oc_max) | 339 | if (val > dev_priv->rps.rp0_freq) |
349 | DRM_DEBUG("User requested overclocking to %d\n", | 340 | DRM_DEBUG("User requested overclocking to %d\n", |
350 | val * GT_FREQUENCY_MULTIPLIER); | 341 | val * GT_FREQUENCY_MULTIPLIER); |
351 | 342 | ||
352 | dev_priv->rps.max_delay = val; | 343 | dev_priv->rps.max_freq_softlimit = val; |
353 | 344 | ||
354 | if (dev_priv->rps.cur_delay > val) { | 345 | if (dev_priv->rps.cur_freq > val) { |
355 | if (IS_VALLEYVIEW(dev)) | 346 | if (IS_VALLEYVIEW(dev)) |
356 | valleyview_set_rps(dev, val); | 347 | valleyview_set_rps(dev, val); |
357 | else | 348 | else |
358 | gen6_set_rps(dev, val); | 349 | gen6_set_rps(dev, val); |
350 | } else if (!IS_VALLEYVIEW(dev)) { | ||
351 | /* We still need gen6_set_rps to process the new max_delay and | ||
352 | * update the interrupt limits even though frequency request is | ||
353 | * unchanged. */ | ||
354 | gen6_set_rps(dev, dev_priv->rps.cur_freq); | ||
359 | } | 355 | } |
360 | else if (!IS_VALLEYVIEW(dev)) | ||
361 | /* We still need gen6_set_rps to process the new max_delay | ||
362 | and update the interrupt limits even though frequency | ||
363 | request is unchanged. */ | ||
364 | gen6_set_rps(dev, dev_priv->rps.cur_delay); | ||
365 | 356 | ||
366 | mutex_unlock(&dev_priv->rps.hw_lock); | 357 | mutex_unlock(&dev_priv->rps.hw_lock); |
367 | 358 | ||
@@ -379,9 +370,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute | |||
379 | 370 | ||
380 | mutex_lock(&dev_priv->rps.hw_lock); | 371 | mutex_lock(&dev_priv->rps.hw_lock); |
381 | if (IS_VALLEYVIEW(dev_priv->dev)) | 372 | if (IS_VALLEYVIEW(dev_priv->dev)) |
382 | ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); | 373 | ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); |
383 | else | 374 | else |
384 | ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; | 375 | ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; |
385 | mutex_unlock(&dev_priv->rps.hw_lock); | 376 | mutex_unlock(&dev_priv->rps.hw_lock); |
386 | 377 | ||
387 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); | 378 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
@@ -394,7 +385,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
394 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 385 | struct drm_minor *minor = dev_to_drm_minor(kdev); |
395 | struct drm_device *dev = minor->dev; | 386 | struct drm_device *dev = minor->dev; |
396 | struct drm_i915_private *dev_priv = dev->dev_private; | 387 | struct drm_i915_private *dev_priv = dev->dev_private; |
397 | u32 val, rp_state_cap, hw_max, hw_min; | 388 | u32 val; |
398 | ssize_t ret; | 389 | ssize_t ret; |
399 | 390 | ||
400 | ret = kstrtou32(buf, 0, &val); | 391 | ret = kstrtou32(buf, 0, &val); |
@@ -405,37 +396,31 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
405 | 396 | ||
406 | mutex_lock(&dev_priv->rps.hw_lock); | 397 | mutex_lock(&dev_priv->rps.hw_lock); |
407 | 398 | ||
408 | if (IS_VALLEYVIEW(dev)) { | 399 | if (IS_VALLEYVIEW(dev)) |
409 | val = vlv_freq_opcode(dev_priv, val); | 400 | val = vlv_freq_opcode(dev_priv, val); |
410 | 401 | else | |
411 | hw_max = valleyview_rps_max_freq(dev_priv); | ||
412 | hw_min = valleyview_rps_min_freq(dev_priv); | ||
413 | } else { | ||
414 | val /= GT_FREQUENCY_MULTIPLIER; | 402 | val /= GT_FREQUENCY_MULTIPLIER; |
415 | 403 | ||
416 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 404 | if (val < dev_priv->rps.min_freq || |
417 | hw_max = dev_priv->rps.hw_max; | 405 | val > dev_priv->rps.max_freq || |
418 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | 406 | val > dev_priv->rps.max_freq_softlimit) { |
419 | } | ||
420 | |||
421 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { | ||
422 | mutex_unlock(&dev_priv->rps.hw_lock); | 407 | mutex_unlock(&dev_priv->rps.hw_lock); |
423 | return -EINVAL; | 408 | return -EINVAL; |
424 | } | 409 | } |
425 | 410 | ||
426 | dev_priv->rps.min_delay = val; | 411 | dev_priv->rps.min_freq_softlimit = val; |
427 | 412 | ||
428 | if (dev_priv->rps.cur_delay < val) { | 413 | if (dev_priv->rps.cur_freq < val) { |
429 | if (IS_VALLEYVIEW(dev)) | 414 | if (IS_VALLEYVIEW(dev)) |
430 | valleyview_set_rps(dev, val); | 415 | valleyview_set_rps(dev, val); |
431 | else | 416 | else |
432 | gen6_set_rps(dev, val); | 417 | gen6_set_rps(dev, val); |
418 | } else if (!IS_VALLEYVIEW(dev)) { | ||
419 | /* We still need gen6_set_rps to process the new min_delay and | ||
420 | * update the interrupt limits even though frequency request is | ||
421 | * unchanged. */ | ||
422 | gen6_set_rps(dev, dev_priv->rps.cur_freq); | ||
433 | } | 423 | } |
434 | else if (!IS_VALLEYVIEW(dev)) | ||
435 | /* We still need gen6_set_rps to process the new min_delay | ||
436 | and update the interrupt limits even though frequency | ||
437 | request is unchanged. */ | ||
438 | gen6_set_rps(dev, dev_priv->rps.cur_delay); | ||
439 | 424 | ||
440 | mutex_unlock(&dev_priv->rps.hw_lock); | 425 | mutex_unlock(&dev_priv->rps.hw_lock); |
441 | 426 | ||
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index b95a380958db..23c26f1f8b37 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -238,14 +238,16 @@ TRACE_EVENT(i915_gem_evict_vm, | |||
238 | TP_ARGS(vm), | 238 | TP_ARGS(vm), |
239 | 239 | ||
240 | TP_STRUCT__entry( | 240 | TP_STRUCT__entry( |
241 | __field(u32, dev) | ||
241 | __field(struct i915_address_space *, vm) | 242 | __field(struct i915_address_space *, vm) |
242 | ), | 243 | ), |
243 | 244 | ||
244 | TP_fast_assign( | 245 | TP_fast_assign( |
246 | __entry->dev = vm->dev->primary->index; | ||
245 | __entry->vm = vm; | 247 | __entry->vm = vm; |
246 | ), | 248 | ), |
247 | 249 | ||
248 | TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm) | 250 | TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) |
249 | ); | 251 | ); |
250 | 252 | ||
251 | TRACE_EVENT(i915_gem_ring_sync_to, | 253 | TRACE_EVENT(i915_gem_ring_sync_to, |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 4ef6d69c078d..4b4e8f0f9621 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -839,7 +839,7 @@ void intel_crt_init(struct drm_device *dev) | |||
839 | intel_connector_attach_encoder(intel_connector, &crt->base); | 839 | intel_connector_attach_encoder(intel_connector, &crt->base); |
840 | 840 | ||
841 | crt->base.type = INTEL_OUTPUT_ANALOG; | 841 | crt->base.type = INTEL_OUTPUT_ANALOG; |
842 | crt->base.cloneable = true; | 842 | crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI); |
843 | if (IS_I830(dev)) | 843 | if (IS_I830(dev)) |
844 | crt->base.crtc_mask = (1 << 0); | 844 | crt->base.crtc_mask = (1 << 0); |
845 | else | 845 | else |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index e2665e09d5df..070bf2e78d61 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -1340,6 +1340,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) | |||
1340 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { | 1340 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
1341 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1341 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1342 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 1342 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
1343 | intel_edp_panel_vdd_on(intel_dp); | ||
1343 | intel_edp_panel_off(intel_dp); | 1344 | intel_edp_panel_off(intel_dp); |
1344 | } | 1345 | } |
1345 | 1346 | ||
@@ -1717,7 +1718,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
1717 | 1718 | ||
1718 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; | 1719 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; |
1719 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 1720 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
1720 | intel_encoder->cloneable = false; | 1721 | intel_encoder->cloneable = 0; |
1721 | intel_encoder->hot_plug = intel_ddi_hot_plug; | 1722 | intel_encoder->hot_plug = intel_ddi_hot_plug; |
1722 | 1723 | ||
1723 | if (init_dp) | 1724 | if (init_dp) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3ffe5a6124f1..6332383abae9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -741,7 +741,7 @@ bool intel_crtc_active(struct drm_crtc *crtc) | |||
741 | * We can ditch the adjusted_mode.crtc_clock check as soon | 741 | * We can ditch the adjusted_mode.crtc_clock check as soon |
742 | * as Haswell has gained clock readout/fastboot support. | 742 | * as Haswell has gained clock readout/fastboot support. |
743 | * | 743 | * |
744 | * We can ditch the crtc->fb check as soon as we can | 744 | * We can ditch the crtc->primary->fb check as soon as we can |
745 | * properly reconstruct framebuffers. | 745 | * properly reconstruct framebuffers. |
746 | */ | 746 | */ |
747 | return intel_crtc->active && crtc->primary->fb && | 747 | return intel_crtc->active && crtc->primary->fb && |
@@ -1166,7 +1166,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, | |||
1166 | if (INTEL_INFO(dev)->gen >= 4) { | 1166 | if (INTEL_INFO(dev)->gen >= 4) { |
1167 | reg = DSPCNTR(pipe); | 1167 | reg = DSPCNTR(pipe); |
1168 | val = I915_READ(reg); | 1168 | val = I915_READ(reg); |
1169 | WARN((val & DISPLAY_PLANE_ENABLE), | 1169 | WARN(val & DISPLAY_PLANE_ENABLE, |
1170 | "plane %c assertion failure, should be disabled but not\n", | 1170 | "plane %c assertion failure, should be disabled but not\n", |
1171 | plane_name(pipe)); | 1171 | plane_name(pipe)); |
1172 | return; | 1172 | return; |
@@ -1195,20 +1195,20 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, | |||
1195 | for_each_sprite(pipe, sprite) { | 1195 | for_each_sprite(pipe, sprite) { |
1196 | reg = SPCNTR(pipe, sprite); | 1196 | reg = SPCNTR(pipe, sprite); |
1197 | val = I915_READ(reg); | 1197 | val = I915_READ(reg); |
1198 | WARN((val & SP_ENABLE), | 1198 | WARN(val & SP_ENABLE, |
1199 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1199 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1200 | sprite_name(pipe, sprite), pipe_name(pipe)); | 1200 | sprite_name(pipe, sprite), pipe_name(pipe)); |
1201 | } | 1201 | } |
1202 | } else if (INTEL_INFO(dev)->gen >= 7) { | 1202 | } else if (INTEL_INFO(dev)->gen >= 7) { |
1203 | reg = SPRCTL(pipe); | 1203 | reg = SPRCTL(pipe); |
1204 | val = I915_READ(reg); | 1204 | val = I915_READ(reg); |
1205 | WARN((val & SPRITE_ENABLE), | 1205 | WARN(val & SPRITE_ENABLE, |
1206 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1206 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1207 | plane_name(pipe), pipe_name(pipe)); | 1207 | plane_name(pipe), pipe_name(pipe)); |
1208 | } else if (INTEL_INFO(dev)->gen >= 5) { | 1208 | } else if (INTEL_INFO(dev)->gen >= 5) { |
1209 | reg = DVSCNTR(pipe); | 1209 | reg = DVSCNTR(pipe); |
1210 | val = I915_READ(reg); | 1210 | val = I915_READ(reg); |
1211 | WARN((val & DVS_ENABLE), | 1211 | WARN(val & DVS_ENABLE, |
1212 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1212 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1213 | plane_name(pipe), pipe_name(pipe)); | 1213 | plane_name(pipe), pipe_name(pipe)); |
1214 | } | 1214 | } |
@@ -1872,15 +1872,15 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv, | |||
1872 | } | 1872 | } |
1873 | 1873 | ||
1874 | /** | 1874 | /** |
1875 | * intel_enable_primary_plane - enable the primary plane on a given pipe | 1875 | * intel_enable_primary_hw_plane - enable the primary plane on a given pipe |
1876 | * @dev_priv: i915 private structure | 1876 | * @dev_priv: i915 private structure |
1877 | * @plane: plane to enable | 1877 | * @plane: plane to enable |
1878 | * @pipe: pipe being fed | 1878 | * @pipe: pipe being fed |
1879 | * | 1879 | * |
1880 | * Enable @plane on @pipe, making sure that @pipe is running first. | 1880 | * Enable @plane on @pipe, making sure that @pipe is running first. |
1881 | */ | 1881 | */ |
1882 | static void intel_enable_primary_plane(struct drm_i915_private *dev_priv, | 1882 | static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, |
1883 | enum plane plane, enum pipe pipe) | 1883 | enum plane plane, enum pipe pipe) |
1884 | { | 1884 | { |
1885 | struct intel_crtc *intel_crtc = | 1885 | struct intel_crtc *intel_crtc = |
1886 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 1886 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
@@ -1905,15 +1905,15 @@ static void intel_enable_primary_plane(struct drm_i915_private *dev_priv, | |||
1905 | } | 1905 | } |
1906 | 1906 | ||
1907 | /** | 1907 | /** |
1908 | * intel_disable_primary_plane - disable the primary plane | 1908 | * intel_disable_primary_hw_plane - disable the primary hardware plane |
1909 | * @dev_priv: i915 private structure | 1909 | * @dev_priv: i915 private structure |
1910 | * @plane: plane to disable | 1910 | * @plane: plane to disable |
1911 | * @pipe: pipe consuming the data | 1911 | * @pipe: pipe consuming the data |
1912 | * | 1912 | * |
1913 | * Disable @plane; should be an independent operation. | 1913 | * Disable @plane; should be an independent operation. |
1914 | */ | 1914 | */ |
1915 | static void intel_disable_primary_plane(struct drm_i915_private *dev_priv, | 1915 | static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv, |
1916 | enum plane plane, enum pipe pipe) | 1916 | enum plane plane, enum pipe pipe) |
1917 | { | 1917 | { |
1918 | struct intel_crtc *intel_crtc = | 1918 | struct intel_crtc *intel_crtc = |
1919 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 1919 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
@@ -2047,8 +2047,114 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y, | |||
2047 | } | 2047 | } |
2048 | } | 2048 | } |
2049 | 2049 | ||
2050 | static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, | 2050 | int intel_format_to_fourcc(int format) |
2051 | int x, int y) | 2051 | { |
2052 | switch (format) { | ||
2053 | case DISPPLANE_8BPP: | ||
2054 | return DRM_FORMAT_C8; | ||
2055 | case DISPPLANE_BGRX555: | ||
2056 | return DRM_FORMAT_XRGB1555; | ||
2057 | case DISPPLANE_BGRX565: | ||
2058 | return DRM_FORMAT_RGB565; | ||
2059 | default: | ||
2060 | case DISPPLANE_BGRX888: | ||
2061 | return DRM_FORMAT_XRGB8888; | ||
2062 | case DISPPLANE_RGBX888: | ||
2063 | return DRM_FORMAT_XBGR8888; | ||
2064 | case DISPPLANE_BGRX101010: | ||
2065 | return DRM_FORMAT_XRGB2101010; | ||
2066 | case DISPPLANE_RGBX101010: | ||
2067 | return DRM_FORMAT_XBGR2101010; | ||
2068 | } | ||
2069 | } | ||
2070 | |||
2071 | static bool intel_alloc_plane_obj(struct intel_crtc *crtc, | ||
2072 | struct intel_plane_config *plane_config) | ||
2073 | { | ||
2074 | struct drm_device *dev = crtc->base.dev; | ||
2075 | struct drm_i915_gem_object *obj = NULL; | ||
2076 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; | ||
2077 | u32 base = plane_config->base; | ||
2078 | |||
2079 | if (plane_config->size == 0) | ||
2080 | return false; | ||
2081 | |||
2082 | obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, | ||
2083 | plane_config->size); | ||
2084 | if (!obj) | ||
2085 | return false; | ||
2086 | |||
2087 | if (plane_config->tiled) { | ||
2088 | obj->tiling_mode = I915_TILING_X; | ||
2089 | obj->stride = crtc->base.primary->fb->pitches[0]; | ||
2090 | } | ||
2091 | |||
2092 | mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format; | ||
2093 | mode_cmd.width = crtc->base.primary->fb->width; | ||
2094 | mode_cmd.height = crtc->base.primary->fb->height; | ||
2095 | mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0]; | ||
2096 | |||
2097 | mutex_lock(&dev->struct_mutex); | ||
2098 | |||
2099 | if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb), | ||
2100 | &mode_cmd, obj)) { | ||
2101 | DRM_DEBUG_KMS("intel fb init failed\n"); | ||
2102 | goto out_unref_obj; | ||
2103 | } | ||
2104 | |||
2105 | mutex_unlock(&dev->struct_mutex); | ||
2106 | |||
2107 | DRM_DEBUG_KMS("plane fb obj %p\n", obj); | ||
2108 | return true; | ||
2109 | |||
2110 | out_unref_obj: | ||
2111 | drm_gem_object_unreference(&obj->base); | ||
2112 | mutex_unlock(&dev->struct_mutex); | ||
2113 | return false; | ||
2114 | } | ||
2115 | |||
2116 | static void intel_find_plane_obj(struct intel_crtc *intel_crtc, | ||
2117 | struct intel_plane_config *plane_config) | ||
2118 | { | ||
2119 | struct drm_device *dev = intel_crtc->base.dev; | ||
2120 | struct drm_crtc *c; | ||
2121 | struct intel_crtc *i; | ||
2122 | struct intel_framebuffer *fb; | ||
2123 | |||
2124 | if (!intel_crtc->base.primary->fb) | ||
2125 | return; | ||
2126 | |||
2127 | if (intel_alloc_plane_obj(intel_crtc, plane_config)) | ||
2128 | return; | ||
2129 | |||
2130 | kfree(intel_crtc->base.primary->fb); | ||
2131 | intel_crtc->base.primary->fb = NULL; | ||
2132 | |||
2133 | /* | ||
2134 | * Failed to alloc the obj, check to see if we should share | ||
2135 | * an fb with another CRTC instead | ||
2136 | */ | ||
2137 | list_for_each_entry(c, &dev->mode_config.crtc_list, head) { | ||
2138 | i = to_intel_crtc(c); | ||
2139 | |||
2140 | if (c == &intel_crtc->base) | ||
2141 | continue; | ||
2142 | |||
2143 | if (!i->active || !c->primary->fb) | ||
2144 | continue; | ||
2145 | |||
2146 | fb = to_intel_framebuffer(c->primary->fb); | ||
2147 | if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) { | ||
2148 | drm_framebuffer_reference(c->primary->fb); | ||
2149 | intel_crtc->base.primary->fb = c->primary->fb; | ||
2150 | break; | ||
2151 | } | ||
2152 | } | ||
2153 | } | ||
2154 | |||
2155 | static int i9xx_update_primary_plane(struct drm_crtc *crtc, | ||
2156 | struct drm_framebuffer *fb, | ||
2157 | int x, int y) | ||
2052 | { | 2158 | { |
2053 | struct drm_device *dev = crtc->dev; | 2159 | struct drm_device *dev = crtc->dev; |
2054 | struct drm_i915_private *dev_priv = dev->dev_private; | 2160 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -2147,8 +2253,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
2147 | return 0; | 2253 | return 0; |
2148 | } | 2254 | } |
2149 | 2255 | ||
2150 | static int ironlake_update_plane(struct drm_crtc *crtc, | 2256 | static int ironlake_update_primary_plane(struct drm_crtc *crtc, |
2151 | struct drm_framebuffer *fb, int x, int y) | 2257 | struct drm_framebuffer *fb, |
2258 | int x, int y) | ||
2152 | { | 2259 | { |
2153 | struct drm_device *dev = crtc->dev; | 2260 | struct drm_device *dev = crtc->dev; |
2154 | struct drm_i915_private *dev_priv = dev->dev_private; | 2261 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -2252,7 +2359,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
2252 | dev_priv->display.disable_fbc(dev); | 2359 | dev_priv->display.disable_fbc(dev); |
2253 | intel_increase_pllclock(crtc); | 2360 | intel_increase_pllclock(crtc); |
2254 | 2361 | ||
2255 | return dev_priv->display.update_plane(crtc, fb, x, y); | 2362 | return dev_priv->display.update_primary_plane(crtc, fb, x, y); |
2256 | } | 2363 | } |
2257 | 2364 | ||
2258 | void intel_display_handle_reset(struct drm_device *dev) | 2365 | void intel_display_handle_reset(struct drm_device *dev) |
@@ -2289,11 +2396,13 @@ void intel_display_handle_reset(struct drm_device *dev) | |||
2289 | /* | 2396 | /* |
2290 | * FIXME: Once we have proper support for primary planes (and | 2397 | * FIXME: Once we have proper support for primary planes (and |
2291 | * disabling them without disabling the entire crtc) allow again | 2398 | * disabling them without disabling the entire crtc) allow again |
2292 | * a NULL crtc->fb. | 2399 | * a NULL crtc->primary->fb. |
2293 | */ | 2400 | */ |
2294 | if (intel_crtc->active && crtc->primary->fb) | 2401 | if (intel_crtc->active && crtc->primary->fb) |
2295 | dev_priv->display.update_plane(crtc, crtc->primary->fb, | 2402 | dev_priv->display.update_primary_plane(crtc, |
2296 | crtc->x, crtc->y); | 2403 | crtc->primary->fb, |
2404 | crtc->x, | ||
2405 | crtc->y); | ||
2297 | mutex_unlock(&crtc->mutex); | 2406 | mutex_unlock(&crtc->mutex); |
2298 | } | 2407 | } |
2299 | } | 2408 | } |
@@ -2372,8 +2481,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2372 | ret = intel_pin_and_fence_fb_obj(dev, | 2481 | ret = intel_pin_and_fence_fb_obj(dev, |
2373 | to_intel_framebuffer(fb)->obj, | 2482 | to_intel_framebuffer(fb)->obj, |
2374 | NULL); | 2483 | NULL); |
2484 | mutex_unlock(&dev->struct_mutex); | ||
2375 | if (ret != 0) { | 2485 | if (ret != 0) { |
2376 | mutex_unlock(&dev->struct_mutex); | ||
2377 | DRM_ERROR("pin & fence failed\n"); | 2486 | DRM_ERROR("pin & fence failed\n"); |
2378 | return ret; | 2487 | return ret; |
2379 | } | 2488 | } |
@@ -2409,8 +2518,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2409 | intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; | 2518 | intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; |
2410 | } | 2519 | } |
2411 | 2520 | ||
2412 | ret = dev_priv->display.update_plane(crtc, fb, x, y); | 2521 | ret = dev_priv->display.update_primary_plane(crtc, fb, x, y); |
2413 | if (ret) { | 2522 | if (ret) { |
2523 | mutex_lock(&dev->struct_mutex); | ||
2414 | intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); | 2524 | intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); |
2415 | mutex_unlock(&dev->struct_mutex); | 2525 | mutex_unlock(&dev->struct_mutex); |
2416 | DRM_ERROR("failed to update base address\n"); | 2526 | DRM_ERROR("failed to update base address\n"); |
@@ -2425,9 +2535,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2425 | if (old_fb) { | 2535 | if (old_fb) { |
2426 | if (intel_crtc->active && old_fb != fb) | 2536 | if (intel_crtc->active && old_fb != fb) |
2427 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2537 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2538 | mutex_lock(&dev->struct_mutex); | ||
2428 | intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); | 2539 | intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); |
2540 | mutex_unlock(&dev->struct_mutex); | ||
2429 | } | 2541 | } |
2430 | 2542 | ||
2543 | mutex_lock(&dev->struct_mutex); | ||
2431 | intel_update_fbc(dev); | 2544 | intel_update_fbc(dev); |
2432 | intel_edp_psr_update(dev); | 2545 | intel_edp_psr_update(dev); |
2433 | mutex_unlock(&dev->struct_mutex); | 2546 | mutex_unlock(&dev->struct_mutex); |
@@ -3592,7 +3705,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
3592 | 3705 | ||
3593 | intel_update_watermarks(crtc); | 3706 | intel_update_watermarks(crtc); |
3594 | intel_enable_pipe(intel_crtc); | 3707 | intel_enable_pipe(intel_crtc); |
3595 | intel_enable_primary_plane(dev_priv, plane, pipe); | 3708 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); |
3596 | intel_enable_planes(crtc); | 3709 | intel_enable_planes(crtc); |
3597 | intel_crtc_update_cursor(crtc, true); | 3710 | intel_crtc_update_cursor(crtc, true); |
3598 | 3711 | ||
@@ -3634,7 +3747,7 @@ static void haswell_crtc_enable_planes(struct drm_crtc *crtc) | |||
3634 | int pipe = intel_crtc->pipe; | 3747 | int pipe = intel_crtc->pipe; |
3635 | int plane = intel_crtc->plane; | 3748 | int plane = intel_crtc->plane; |
3636 | 3749 | ||
3637 | intel_enable_primary_plane(dev_priv, plane, pipe); | 3750 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); |
3638 | intel_enable_planes(crtc); | 3751 | intel_enable_planes(crtc); |
3639 | intel_crtc_update_cursor(crtc, true); | 3752 | intel_crtc_update_cursor(crtc, true); |
3640 | 3753 | ||
@@ -3664,7 +3777,7 @@ static void haswell_crtc_disable_planes(struct drm_crtc *crtc) | |||
3664 | 3777 | ||
3665 | intel_crtc_update_cursor(crtc, false); | 3778 | intel_crtc_update_cursor(crtc, false); |
3666 | intel_disable_planes(crtc); | 3779 | intel_disable_planes(crtc); |
3667 | intel_disable_primary_plane(dev_priv, plane, pipe); | 3780 | intel_disable_primary_hw_plane(dev_priv, plane, pipe); |
3668 | } | 3781 | } |
3669 | 3782 | ||
3670 | /* | 3783 | /* |
@@ -3792,7 +3905,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
3792 | 3905 | ||
3793 | intel_crtc_update_cursor(crtc, false); | 3906 | intel_crtc_update_cursor(crtc, false); |
3794 | intel_disable_planes(crtc); | 3907 | intel_disable_planes(crtc); |
3795 | intel_disable_primary_plane(dev_priv, plane, pipe); | 3908 | intel_disable_primary_hw_plane(dev_priv, plane, pipe); |
3796 | 3909 | ||
3797 | if (intel_crtc->config.has_pch_encoder) | 3910 | if (intel_crtc->config.has_pch_encoder) |
3798 | intel_set_pch_fifo_underrun_reporting(dev, pipe, false); | 3911 | intel_set_pch_fifo_underrun_reporting(dev, pipe, false); |
@@ -4275,7 +4388,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) | |||
4275 | intel_update_watermarks(crtc); | 4388 | intel_update_watermarks(crtc); |
4276 | intel_enable_pipe(intel_crtc); | 4389 | intel_enable_pipe(intel_crtc); |
4277 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); | 4390 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); |
4278 | intel_enable_primary_plane(dev_priv, plane, pipe); | 4391 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); |
4279 | intel_enable_planes(crtc); | 4392 | intel_enable_planes(crtc); |
4280 | intel_crtc_update_cursor(crtc, true); | 4393 | intel_crtc_update_cursor(crtc, true); |
4281 | 4394 | ||
@@ -4314,7 +4427,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) | |||
4314 | intel_update_watermarks(crtc); | 4427 | intel_update_watermarks(crtc); |
4315 | intel_enable_pipe(intel_crtc); | 4428 | intel_enable_pipe(intel_crtc); |
4316 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); | 4429 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); |
4317 | intel_enable_primary_plane(dev_priv, plane, pipe); | 4430 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); |
4318 | intel_enable_planes(crtc); | 4431 | intel_enable_planes(crtc); |
4319 | /* The fixup needs to happen before cursor is enabled */ | 4432 | /* The fixup needs to happen before cursor is enabled */ |
4320 | if (IS_G4X(dev)) | 4433 | if (IS_G4X(dev)) |
@@ -4370,7 +4483,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
4370 | intel_crtc_dpms_overlay(intel_crtc, false); | 4483 | intel_crtc_dpms_overlay(intel_crtc, false); |
4371 | intel_crtc_update_cursor(crtc, false); | 4484 | intel_crtc_update_cursor(crtc, false); |
4372 | intel_disable_planes(crtc); | 4485 | intel_disable_planes(crtc); |
4373 | intel_disable_primary_plane(dev_priv, plane, pipe); | 4486 | intel_disable_primary_hw_plane(dev_priv, plane, pipe); |
4374 | 4487 | ||
4375 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); | 4488 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); |
4376 | intel_disable_pipe(dev_priv, pipe); | 4489 | intel_disable_pipe(dev_priv, pipe); |
@@ -5611,6 +5724,67 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, | |||
5611 | pipe_config->port_clock = clock.dot / 5; | 5724 | pipe_config->port_clock = clock.dot / 5; |
5612 | } | 5725 | } |
5613 | 5726 | ||
5727 | static void i9xx_get_plane_config(struct intel_crtc *crtc, | ||
5728 | struct intel_plane_config *plane_config) | ||
5729 | { | ||
5730 | struct drm_device *dev = crtc->base.dev; | ||
5731 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5732 | u32 val, base, offset; | ||
5733 | int pipe = crtc->pipe, plane = crtc->plane; | ||
5734 | int fourcc, pixel_format; | ||
5735 | int aligned_height; | ||
5736 | |||
5737 | crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); | ||
5738 | if (!crtc->base.primary->fb) { | ||
5739 | DRM_DEBUG_KMS("failed to alloc fb\n"); | ||
5740 | return; | ||
5741 | } | ||
5742 | |||
5743 | val = I915_READ(DSPCNTR(plane)); | ||
5744 | |||
5745 | if (INTEL_INFO(dev)->gen >= 4) | ||
5746 | if (val & DISPPLANE_TILED) | ||
5747 | plane_config->tiled = true; | ||
5748 | |||
5749 | pixel_format = val & DISPPLANE_PIXFORMAT_MASK; | ||
5750 | fourcc = intel_format_to_fourcc(pixel_format); | ||
5751 | crtc->base.primary->fb->pixel_format = fourcc; | ||
5752 | crtc->base.primary->fb->bits_per_pixel = | ||
5753 | drm_format_plane_cpp(fourcc, 0) * 8; | ||
5754 | |||
5755 | if (INTEL_INFO(dev)->gen >= 4) { | ||
5756 | if (plane_config->tiled) | ||
5757 | offset = I915_READ(DSPTILEOFF(plane)); | ||
5758 | else | ||
5759 | offset = I915_READ(DSPLINOFF(plane)); | ||
5760 | base = I915_READ(DSPSURF(plane)) & 0xfffff000; | ||
5761 | } else { | ||
5762 | base = I915_READ(DSPADDR(plane)); | ||
5763 | } | ||
5764 | plane_config->base = base; | ||
5765 | |||
5766 | val = I915_READ(PIPESRC(pipe)); | ||
5767 | crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; | ||
5768 | crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; | ||
5769 | |||
5770 | val = I915_READ(DSPSTRIDE(pipe)); | ||
5771 | crtc->base.primary->fb->pitches[0] = val & 0xffffff80; | ||
5772 | |||
5773 | aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, | ||
5774 | plane_config->tiled); | ||
5775 | |||
5776 | plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * | ||
5777 | aligned_height, PAGE_SIZE); | ||
5778 | |||
5779 | DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", | ||
5780 | pipe, plane, crtc->base.primary->fb->width, | ||
5781 | crtc->base.primary->fb->height, | ||
5782 | crtc->base.primary->fb->bits_per_pixel, base, | ||
5783 | crtc->base.primary->fb->pitches[0], | ||
5784 | plane_config->size); | ||
5785 | |||
5786 | } | ||
5787 | |||
5614 | static bool i9xx_get_pipe_config(struct intel_crtc *crtc, | 5788 | static bool i9xx_get_pipe_config(struct intel_crtc *crtc, |
5615 | struct intel_crtc_config *pipe_config) | 5789 | struct intel_crtc_config *pipe_config) |
5616 | { | 5790 | { |
@@ -6558,6 +6732,66 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, | |||
6558 | } | 6732 | } |
6559 | } | 6733 | } |
6560 | 6734 | ||
6735 | static void ironlake_get_plane_config(struct intel_crtc *crtc, | ||
6736 | struct intel_plane_config *plane_config) | ||
6737 | { | ||
6738 | struct drm_device *dev = crtc->base.dev; | ||
6739 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6740 | u32 val, base, offset; | ||
6741 | int pipe = crtc->pipe, plane = crtc->plane; | ||
6742 | int fourcc, pixel_format; | ||
6743 | int aligned_height; | ||
6744 | |||
6745 | crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); | ||
6746 | if (!crtc->base.primary->fb) { | ||
6747 | DRM_DEBUG_KMS("failed to alloc fb\n"); | ||
6748 | return; | ||
6749 | } | ||
6750 | |||
6751 | val = I915_READ(DSPCNTR(plane)); | ||
6752 | |||
6753 | if (INTEL_INFO(dev)->gen >= 4) | ||
6754 | if (val & DISPPLANE_TILED) | ||
6755 | plane_config->tiled = true; | ||
6756 | |||
6757 | pixel_format = val & DISPPLANE_PIXFORMAT_MASK; | ||
6758 | fourcc = intel_format_to_fourcc(pixel_format); | ||
6759 | crtc->base.primary->fb->pixel_format = fourcc; | ||
6760 | crtc->base.primary->fb->bits_per_pixel = | ||
6761 | drm_format_plane_cpp(fourcc, 0) * 8; | ||
6762 | |||
6763 | base = I915_READ(DSPSURF(plane)) & 0xfffff000; | ||
6764 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | ||
6765 | offset = I915_READ(DSPOFFSET(plane)); | ||
6766 | } else { | ||
6767 | if (plane_config->tiled) | ||
6768 | offset = I915_READ(DSPTILEOFF(plane)); | ||
6769 | else | ||
6770 | offset = I915_READ(DSPLINOFF(plane)); | ||
6771 | } | ||
6772 | plane_config->base = base; | ||
6773 | |||
6774 | val = I915_READ(PIPESRC(pipe)); | ||
6775 | crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; | ||
6776 | crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; | ||
6777 | |||
6778 | val = I915_READ(DSPSTRIDE(pipe)); | ||
6779 | crtc->base.primary->fb->pitches[0] = val & 0xffffff80; | ||
6780 | |||
6781 | aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, | ||
6782 | plane_config->tiled); | ||
6783 | |||
6784 | plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * | ||
6785 | aligned_height, PAGE_SIZE); | ||
6786 | |||
6787 | DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", | ||
6788 | pipe, plane, crtc->base.primary->fb->width, | ||
6789 | crtc->base.primary->fb->height, | ||
6790 | crtc->base.primary->fb->bits_per_pixel, base, | ||
6791 | crtc->base.primary->fb->pitches[0], | ||
6792 | plane_config->size); | ||
6793 | } | ||
6794 | |||
6561 | static bool ironlake_get_pipe_config(struct intel_crtc *crtc, | 6795 | static bool ironlake_get_pipe_config(struct intel_crtc *crtc, |
6562 | struct intel_crtc_config *pipe_config) | 6796 | struct intel_crtc_config *pipe_config) |
6563 | { | 6797 | { |
@@ -6732,6 +6966,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, | |||
6732 | static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) | 6966 | static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) |
6733 | { | 6967 | { |
6734 | uint32_t val; | 6968 | uint32_t val; |
6969 | unsigned long irqflags; | ||
6735 | 6970 | ||
6736 | val = I915_READ(LCPLL_CTL); | 6971 | val = I915_READ(LCPLL_CTL); |
6737 | 6972 | ||
@@ -6739,9 +6974,22 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) | |||
6739 | LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) | 6974 | LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) |
6740 | return; | 6975 | return; |
6741 | 6976 | ||
6742 | /* Make sure we're not on PC8 state before disabling PC8, otherwise | 6977 | /* |
6743 | * we'll hang the machine! */ | 6978 | * Make sure we're not on PC8 state before disabling PC8, otherwise |
6744 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | 6979 | * we'll hang the machine. To prevent PC8 state, just enable force_wake. |
6980 | * | ||
6981 | * The other problem is that hsw_restore_lcpll() is called as part of | ||
6982 | * the runtime PM resume sequence, so we can't just call | ||
6983 | * gen6_gt_force_wake_get() because that function calls | ||
6984 | * intel_runtime_pm_get(), and we can't change the runtime PM refcount | ||
6985 | * while we are on the resume sequence. So to solve this problem we have | ||
6986 | * to call special forcewake code that doesn't touch runtime PM and | ||
6987 | * doesn't enable the forcewake delayed work. | ||
6988 | */ | ||
6989 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | ||
6990 | if (dev_priv->uncore.forcewake_count++ == 0) | ||
6991 | dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); | ||
6992 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | ||
6745 | 6993 | ||
6746 | if (val & LCPLL_POWER_DOWN_ALLOW) { | 6994 | if (val & LCPLL_POWER_DOWN_ALLOW) { |
6747 | val &= ~LCPLL_POWER_DOWN_ALLOW; | 6995 | val &= ~LCPLL_POWER_DOWN_ALLOW; |
@@ -6775,26 +7023,45 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) | |||
6775 | DRM_ERROR("Switching back to LCPLL failed\n"); | 7023 | DRM_ERROR("Switching back to LCPLL failed\n"); |
6776 | } | 7024 | } |
6777 | 7025 | ||
6778 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); | 7026 | /* See the big comment above. */ |
7027 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | ||
7028 | if (--dev_priv->uncore.forcewake_count == 0) | ||
7029 | dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); | ||
7030 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | ||
6779 | } | 7031 | } |
6780 | 7032 | ||
6781 | void hsw_enable_pc8_work(struct work_struct *__work) | 7033 | /* |
7034 | * Package states C8 and deeper are really deep PC states that can only be | ||
7035 | * reached when all the devices on the system allow it, so even if the graphics | ||
7036 | * device allows PC8+, it doesn't mean the system will actually get to these | ||
7037 | * states. Our driver only allows PC8+ when going into runtime PM. | ||
7038 | * | ||
7039 | * The requirements for PC8+ are that all the outputs are disabled, the power | ||
7040 | * well is disabled and most interrupts are disabled, and these are also | ||
7041 | * requirements for runtime PM. When these conditions are met, we manually do | ||
7042 | * the other conditions: disable the interrupts, clocks and switch LCPLL refclk | ||
7043 | * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard | ||
7044 | * hang the machine. | ||
7045 | * | ||
7046 | * When we really reach PC8 or deeper states (not just when we allow it) we lose | ||
7047 | * the state of some registers, so when we come back from PC8+ we need to | ||
7048 | * restore this state. We don't get into PC8+ if we're not in RC6, so we don't | ||
7049 | * need to take care of the registers kept by RC6. Notice that this happens even | ||
7050 | * if we don't put the device in PCI D3 state (which is what currently happens | ||
7051 | * because of the runtime PM support). | ||
7052 | * | ||
7053 | * For more, read "Display Sequences for Package C8" on the hardware | ||
7054 | * documentation. | ||
7055 | */ | ||
7056 | void hsw_enable_pc8(struct drm_i915_private *dev_priv) | ||
6782 | { | 7057 | { |
6783 | struct drm_i915_private *dev_priv = | ||
6784 | container_of(to_delayed_work(__work), struct drm_i915_private, | ||
6785 | pc8.enable_work); | ||
6786 | struct drm_device *dev = dev_priv->dev; | 7058 | struct drm_device *dev = dev_priv->dev; |
6787 | uint32_t val; | 7059 | uint32_t val; |
6788 | 7060 | ||
6789 | WARN_ON(!HAS_PC8(dev)); | 7061 | WARN_ON(!HAS_PC8(dev)); |
6790 | 7062 | ||
6791 | if (dev_priv->pc8.enabled) | ||
6792 | return; | ||
6793 | |||
6794 | DRM_DEBUG_KMS("Enabling package C8+\n"); | 7063 | DRM_DEBUG_KMS("Enabling package C8+\n"); |
6795 | 7064 | ||
6796 | dev_priv->pc8.enabled = true; | ||
6797 | |||
6798 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | 7065 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
6799 | val = I915_READ(SOUTH_DSPCLK_GATE_D); | 7066 | val = I915_READ(SOUTH_DSPCLK_GATE_D); |
6800 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; | 7067 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; |
@@ -6802,51 +7069,21 @@ void hsw_enable_pc8_work(struct work_struct *__work) | |||
6802 | } | 7069 | } |
6803 | 7070 | ||
6804 | lpt_disable_clkout_dp(dev); | 7071 | lpt_disable_clkout_dp(dev); |
6805 | hsw_pc8_disable_interrupts(dev); | 7072 | hsw_runtime_pm_disable_interrupts(dev); |
6806 | hsw_disable_lcpll(dev_priv, true, true); | 7073 | hsw_disable_lcpll(dev_priv, true, true); |
6807 | |||
6808 | intel_runtime_pm_put(dev_priv); | ||
6809 | } | 7074 | } |
6810 | 7075 | ||
6811 | static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv) | 7076 | void hsw_disable_pc8(struct drm_i915_private *dev_priv) |
6812 | { | ||
6813 | WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock)); | ||
6814 | WARN(dev_priv->pc8.disable_count < 1, | ||
6815 | "pc8.disable_count: %d\n", dev_priv->pc8.disable_count); | ||
6816 | |||
6817 | dev_priv->pc8.disable_count--; | ||
6818 | if (dev_priv->pc8.disable_count != 0) | ||
6819 | return; | ||
6820 | |||
6821 | schedule_delayed_work(&dev_priv->pc8.enable_work, | ||
6822 | msecs_to_jiffies(i915.pc8_timeout)); | ||
6823 | } | ||
6824 | |||
6825 | static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) | ||
6826 | { | 7077 | { |
6827 | struct drm_device *dev = dev_priv->dev; | 7078 | struct drm_device *dev = dev_priv->dev; |
6828 | uint32_t val; | 7079 | uint32_t val; |
6829 | 7080 | ||
6830 | WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock)); | ||
6831 | WARN(dev_priv->pc8.disable_count < 0, | ||
6832 | "pc8.disable_count: %d\n", dev_priv->pc8.disable_count); | ||
6833 | |||
6834 | dev_priv->pc8.disable_count++; | ||
6835 | if (dev_priv->pc8.disable_count != 1) | ||
6836 | return; | ||
6837 | |||
6838 | WARN_ON(!HAS_PC8(dev)); | 7081 | WARN_ON(!HAS_PC8(dev)); |
6839 | 7082 | ||
6840 | cancel_delayed_work_sync(&dev_priv->pc8.enable_work); | ||
6841 | if (!dev_priv->pc8.enabled) | ||
6842 | return; | ||
6843 | |||
6844 | DRM_DEBUG_KMS("Disabling package C8+\n"); | 7083 | DRM_DEBUG_KMS("Disabling package C8+\n"); |
6845 | 7084 | ||
6846 | intel_runtime_pm_get(dev_priv); | ||
6847 | |||
6848 | hsw_restore_lcpll(dev_priv); | 7085 | hsw_restore_lcpll(dev_priv); |
6849 | hsw_pc8_restore_interrupts(dev); | 7086 | hsw_runtime_pm_restore_interrupts(dev); |
6850 | lpt_init_pch_refclk(dev); | 7087 | lpt_init_pch_refclk(dev); |
6851 | 7088 | ||
6852 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | 7089 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
@@ -6860,89 +7097,11 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) | |||
6860 | mutex_lock(&dev_priv->rps.hw_lock); | 7097 | mutex_lock(&dev_priv->rps.hw_lock); |
6861 | gen6_update_ring_freq(dev); | 7098 | gen6_update_ring_freq(dev); |
6862 | mutex_unlock(&dev_priv->rps.hw_lock); | 7099 | mutex_unlock(&dev_priv->rps.hw_lock); |
6863 | dev_priv->pc8.enabled = false; | ||
6864 | } | ||
6865 | |||
6866 | void hsw_enable_package_c8(struct drm_i915_private *dev_priv) | ||
6867 | { | ||
6868 | if (!HAS_PC8(dev_priv->dev)) | ||
6869 | return; | ||
6870 | |||
6871 | mutex_lock(&dev_priv->pc8.lock); | ||
6872 | __hsw_enable_package_c8(dev_priv); | ||
6873 | mutex_unlock(&dev_priv->pc8.lock); | ||
6874 | } | ||
6875 | |||
6876 | void hsw_disable_package_c8(struct drm_i915_private *dev_priv) | ||
6877 | { | ||
6878 | if (!HAS_PC8(dev_priv->dev)) | ||
6879 | return; | ||
6880 | |||
6881 | mutex_lock(&dev_priv->pc8.lock); | ||
6882 | __hsw_disable_package_c8(dev_priv); | ||
6883 | mutex_unlock(&dev_priv->pc8.lock); | ||
6884 | } | ||
6885 | |||
6886 | static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv) | ||
6887 | { | ||
6888 | struct drm_device *dev = dev_priv->dev; | ||
6889 | struct intel_crtc *crtc; | ||
6890 | uint32_t val; | ||
6891 | |||
6892 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) | ||
6893 | if (crtc->base.enabled) | ||
6894 | return false; | ||
6895 | |||
6896 | /* This case is still possible since we have the i915.disable_power_well | ||
6897 | * parameter and also the KVMr or something else might be requesting the | ||
6898 | * power well. */ | ||
6899 | val = I915_READ(HSW_PWR_WELL_DRIVER); | ||
6900 | if (val != 0) { | ||
6901 | DRM_DEBUG_KMS("Not enabling PC8: power well on\n"); | ||
6902 | return false; | ||
6903 | } | ||
6904 | |||
6905 | return true; | ||
6906 | } | ||
6907 | |||
6908 | /* Since we're called from modeset_global_resources there's no way to | ||
6909 | * symmetrically increase and decrease the refcount, so we use | ||
6910 | * dev_priv->pc8.requirements_met to track whether we already have the refcount | ||
6911 | * or not. | ||
6912 | */ | ||
6913 | static void hsw_update_package_c8(struct drm_device *dev) | ||
6914 | { | ||
6915 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6916 | bool allow; | ||
6917 | |||
6918 | if (!HAS_PC8(dev_priv->dev)) | ||
6919 | return; | ||
6920 | |||
6921 | if (!i915.enable_pc8) | ||
6922 | return; | ||
6923 | |||
6924 | mutex_lock(&dev_priv->pc8.lock); | ||
6925 | |||
6926 | allow = hsw_can_enable_package_c8(dev_priv); | ||
6927 | |||
6928 | if (allow == dev_priv->pc8.requirements_met) | ||
6929 | goto done; | ||
6930 | |||
6931 | dev_priv->pc8.requirements_met = allow; | ||
6932 | |||
6933 | if (allow) | ||
6934 | __hsw_enable_package_c8(dev_priv); | ||
6935 | else | ||
6936 | __hsw_disable_package_c8(dev_priv); | ||
6937 | |||
6938 | done: | ||
6939 | mutex_unlock(&dev_priv->pc8.lock); | ||
6940 | } | 7100 | } |
6941 | 7101 | ||
6942 | static void haswell_modeset_global_resources(struct drm_device *dev) | 7102 | static void haswell_modeset_global_resources(struct drm_device *dev) |
6943 | { | 7103 | { |
6944 | modeset_update_crtc_power_domains(dev); | 7104 | modeset_update_crtc_power_domains(dev); |
6945 | hsw_update_package_c8(dev); | ||
6946 | } | 7105 | } |
6947 | 7106 | ||
6948 | static int haswell_crtc_mode_set(struct drm_crtc *crtc, | 7107 | static int haswell_crtc_mode_set(struct drm_crtc *crtc, |
@@ -7446,10 +7605,26 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
7446 | bool visible = base != 0; | 7605 | bool visible = base != 0; |
7447 | 7606 | ||
7448 | if (intel_crtc->cursor_visible != visible) { | 7607 | if (intel_crtc->cursor_visible != visible) { |
7608 | int16_t width = intel_crtc->cursor_width; | ||
7449 | uint32_t cntl = I915_READ(CURCNTR(pipe)); | 7609 | uint32_t cntl = I915_READ(CURCNTR(pipe)); |
7450 | if (base) { | 7610 | if (base) { |
7451 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | 7611 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); |
7452 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | 7612 | cntl |= MCURSOR_GAMMA_ENABLE; |
7613 | |||
7614 | switch (width) { | ||
7615 | case 64: | ||
7616 | cntl |= CURSOR_MODE_64_ARGB_AX; | ||
7617 | break; | ||
7618 | case 128: | ||
7619 | cntl |= CURSOR_MODE_128_ARGB_AX; | ||
7620 | break; | ||
7621 | case 256: | ||
7622 | cntl |= CURSOR_MODE_256_ARGB_AX; | ||
7623 | break; | ||
7624 | default: | ||
7625 | WARN_ON(1); | ||
7626 | return; | ||
7627 | } | ||
7453 | cntl |= pipe << 28; /* Connect to correct pipe */ | 7628 | cntl |= pipe << 28; /* Connect to correct pipe */ |
7454 | } else { | 7629 | } else { |
7455 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | 7630 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
@@ -7474,10 +7649,25 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) | |||
7474 | bool visible = base != 0; | 7649 | bool visible = base != 0; |
7475 | 7650 | ||
7476 | if (intel_crtc->cursor_visible != visible) { | 7651 | if (intel_crtc->cursor_visible != visible) { |
7652 | int16_t width = intel_crtc->cursor_width; | ||
7477 | uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); | 7653 | uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); |
7478 | if (base) { | 7654 | if (base) { |
7479 | cntl &= ~CURSOR_MODE; | 7655 | cntl &= ~CURSOR_MODE; |
7480 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | 7656 | cntl |= MCURSOR_GAMMA_ENABLE; |
7657 | switch (width) { | ||
7658 | case 64: | ||
7659 | cntl |= CURSOR_MODE_64_ARGB_AX; | ||
7660 | break; | ||
7661 | case 128: | ||
7662 | cntl |= CURSOR_MODE_128_ARGB_AX; | ||
7663 | break; | ||
7664 | case 256: | ||
7665 | cntl |= CURSOR_MODE_256_ARGB_AX; | ||
7666 | break; | ||
7667 | default: | ||
7668 | WARN_ON(1); | ||
7669 | return; | ||
7670 | } | ||
7481 | } else { | 7671 | } else { |
7482 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | 7672 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
7483 | cntl |= CURSOR_MODE_DISABLE; | 7673 | cntl |= CURSOR_MODE_DISABLE; |
@@ -7573,9 +7763,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
7573 | goto finish; | 7763 | goto finish; |
7574 | } | 7764 | } |
7575 | 7765 | ||
7576 | /* Currently we only support 64x64 cursors */ | 7766 | /* Check for which cursor types we support */ |
7577 | if (width != 64 || height != 64) { | 7767 | if (!((width == 64 && height == 64) || |
7578 | DRM_ERROR("we currently only support 64x64 cursors\n"); | 7768 | (width == 128 && height == 128 && !IS_GEN2(dev)) || |
7769 | (width == 256 && height == 256 && !IS_GEN2(dev)))) { | ||
7770 | DRM_DEBUG("Cursor dimension not supported\n"); | ||
7579 | return -EINVAL; | 7771 | return -EINVAL; |
7580 | } | 7772 | } |
7581 | 7773 | ||
@@ -8230,7 +8422,7 @@ void intel_mark_busy(struct drm_device *dev) | |||
8230 | if (dev_priv->mm.busy) | 8422 | if (dev_priv->mm.busy) |
8231 | return; | 8423 | return; |
8232 | 8424 | ||
8233 | hsw_disable_package_c8(dev_priv); | 8425 | intel_runtime_pm_get(dev_priv); |
8234 | i915_update_gfx_val(dev_priv); | 8426 | i915_update_gfx_val(dev_priv); |
8235 | dev_priv->mm.busy = true; | 8427 | dev_priv->mm.busy = true; |
8236 | } | 8428 | } |
@@ -8259,7 +8451,7 @@ void intel_mark_idle(struct drm_device *dev) | |||
8259 | gen6_rps_idle(dev->dev_private); | 8451 | gen6_rps_idle(dev->dev_private); |
8260 | 8452 | ||
8261 | out: | 8453 | out: |
8262 | hsw_enable_package_c8(dev_priv); | 8454 | intel_runtime_pm_put(dev_priv); |
8263 | } | 8455 | } |
8264 | 8456 | ||
8265 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj, | 8457 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj, |
@@ -9015,23 +9207,47 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, | |||
9015 | DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); | 9207 | DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); |
9016 | } | 9208 | } |
9017 | 9209 | ||
9018 | static bool check_encoder_cloning(struct drm_crtc *crtc) | 9210 | static bool encoders_cloneable(const struct intel_encoder *a, |
9211 | const struct intel_encoder *b) | ||
9019 | { | 9212 | { |
9020 | int num_encoders = 0; | 9213 | /* masks could be asymmetric, so check both ways */ |
9021 | bool uncloneable_encoders = false; | 9214 | return a == b || (a->cloneable & (1 << b->type) && |
9215 | b->cloneable & (1 << a->type)); | ||
9216 | } | ||
9217 | |||
9218 | static bool check_single_encoder_cloning(struct intel_crtc *crtc, | ||
9219 | struct intel_encoder *encoder) | ||
9220 | { | ||
9221 | struct drm_device *dev = crtc->base.dev; | ||
9222 | struct intel_encoder *source_encoder; | ||
9223 | |||
9224 | list_for_each_entry(source_encoder, | ||
9225 | &dev->mode_config.encoder_list, base.head) { | ||
9226 | if (source_encoder->new_crtc != crtc) | ||
9227 | continue; | ||
9228 | |||
9229 | if (!encoders_cloneable(encoder, source_encoder)) | ||
9230 | return false; | ||
9231 | } | ||
9232 | |||
9233 | return true; | ||
9234 | } | ||
9235 | |||
9236 | static bool check_encoder_cloning(struct intel_crtc *crtc) | ||
9237 | { | ||
9238 | struct drm_device *dev = crtc->base.dev; | ||
9022 | struct intel_encoder *encoder; | 9239 | struct intel_encoder *encoder; |
9023 | 9240 | ||
9024 | list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, | 9241 | list_for_each_entry(encoder, |
9025 | base.head) { | 9242 | &dev->mode_config.encoder_list, base.head) { |
9026 | if (&encoder->new_crtc->base != crtc) | 9243 | if (encoder->new_crtc != crtc) |
9027 | continue; | 9244 | continue; |
9028 | 9245 | ||
9029 | num_encoders++; | 9246 | if (!check_single_encoder_cloning(crtc, encoder)) |
9030 | if (!encoder->cloneable) | 9247 | return false; |
9031 | uncloneable_encoders = true; | ||
9032 | } | 9248 | } |
9033 | 9249 | ||
9034 | return !(num_encoders > 1 && uncloneable_encoders); | 9250 | return true; |
9035 | } | 9251 | } |
9036 | 9252 | ||
9037 | static struct intel_crtc_config * | 9253 | static struct intel_crtc_config * |
@@ -9045,7 +9261,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, | |||
9045 | int plane_bpp, ret = -EINVAL; | 9261 | int plane_bpp, ret = -EINVAL; |
9046 | bool retry = true; | 9262 | bool retry = true; |
9047 | 9263 | ||
9048 | if (!check_encoder_cloning(crtc)) { | 9264 | if (!check_encoder_cloning(to_intel_crtc(crtc))) { |
9049 | DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); | 9265 | DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); |
9050 | return ERR_PTR(-EINVAL); | 9266 | return ERR_PTR(-EINVAL); |
9051 | } | 9267 | } |
@@ -10337,6 +10553,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
10337 | 10553 | ||
10338 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); | 10554 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); |
10339 | 10555 | ||
10556 | if (IS_GEN2(dev)) { | ||
10557 | intel_crtc->max_cursor_width = GEN2_CURSOR_WIDTH; | ||
10558 | intel_crtc->max_cursor_height = GEN2_CURSOR_HEIGHT; | ||
10559 | } else { | ||
10560 | intel_crtc->max_cursor_width = CURSOR_WIDTH; | ||
10561 | intel_crtc->max_cursor_height = CURSOR_HEIGHT; | ||
10562 | } | ||
10563 | dev->mode_config.cursor_width = intel_crtc->max_cursor_width; | ||
10564 | dev->mode_config.cursor_height = intel_crtc->max_cursor_height; | ||
10565 | |||
10340 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); | 10566 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); |
10341 | for (i = 0; i < 256; i++) { | 10567 | for (i = 0; i < 256; i++) { |
10342 | intel_crtc->lut_r[i] = i; | 10568 | intel_crtc->lut_r[i] = i; |
@@ -10408,12 +10634,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder) | |||
10408 | 10634 | ||
10409 | list_for_each_entry(source_encoder, | 10635 | list_for_each_entry(source_encoder, |
10410 | &dev->mode_config.encoder_list, base.head) { | 10636 | &dev->mode_config.encoder_list, base.head) { |
10411 | 10637 | if (encoders_cloneable(encoder, source_encoder)) | |
10412 | if (encoder == source_encoder) | ||
10413 | index_mask |= (1 << entry); | ||
10414 | |||
10415 | /* Intel hw has only one MUX where enocoders could be cloned. */ | ||
10416 | if (encoder->cloneable && source_encoder->cloneable) | ||
10417 | index_mask |= (1 << entry); | 10638 | index_mask |= (1 << entry); |
10418 | 10639 | ||
10419 | entry++; | 10640 | entry++; |
@@ -10770,32 +10991,40 @@ static void intel_init_display(struct drm_device *dev) | |||
10770 | 10991 | ||
10771 | if (HAS_DDI(dev)) { | 10992 | if (HAS_DDI(dev)) { |
10772 | dev_priv->display.get_pipe_config = haswell_get_pipe_config; | 10993 | dev_priv->display.get_pipe_config = haswell_get_pipe_config; |
10994 | dev_priv->display.get_plane_config = ironlake_get_plane_config; | ||
10773 | dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; | 10995 | dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; |
10774 | dev_priv->display.crtc_enable = haswell_crtc_enable; | 10996 | dev_priv->display.crtc_enable = haswell_crtc_enable; |
10775 | dev_priv->display.crtc_disable = haswell_crtc_disable; | 10997 | dev_priv->display.crtc_disable = haswell_crtc_disable; |
10776 | dev_priv->display.off = haswell_crtc_off; | 10998 | dev_priv->display.off = haswell_crtc_off; |
10777 | dev_priv->display.update_plane = ironlake_update_plane; | 10999 | dev_priv->display.update_primary_plane = |
11000 | ironlake_update_primary_plane; | ||
10778 | } else if (HAS_PCH_SPLIT(dev)) { | 11001 | } else if (HAS_PCH_SPLIT(dev)) { |
10779 | dev_priv->display.get_pipe_config = ironlake_get_pipe_config; | 11002 | dev_priv->display.get_pipe_config = ironlake_get_pipe_config; |
11003 | dev_priv->display.get_plane_config = ironlake_get_plane_config; | ||
10780 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; | 11004 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
10781 | dev_priv->display.crtc_enable = ironlake_crtc_enable; | 11005 | dev_priv->display.crtc_enable = ironlake_crtc_enable; |
10782 | dev_priv->display.crtc_disable = ironlake_crtc_disable; | 11006 | dev_priv->display.crtc_disable = ironlake_crtc_disable; |
10783 | dev_priv->display.off = ironlake_crtc_off; | 11007 | dev_priv->display.off = ironlake_crtc_off; |
10784 | dev_priv->display.update_plane = ironlake_update_plane; | 11008 | dev_priv->display.update_primary_plane = |
11009 | ironlake_update_primary_plane; | ||
10785 | } else if (IS_VALLEYVIEW(dev)) { | 11010 | } else if (IS_VALLEYVIEW(dev)) { |
10786 | dev_priv->display.get_pipe_config = i9xx_get_pipe_config; | 11011 | dev_priv->display.get_pipe_config = i9xx_get_pipe_config; |
11012 | dev_priv->display.get_plane_config = i9xx_get_plane_config; | ||
10787 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; | 11013 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; |
10788 | dev_priv->display.crtc_enable = valleyview_crtc_enable; | 11014 | dev_priv->display.crtc_enable = valleyview_crtc_enable; |
10789 | dev_priv->display.crtc_disable = i9xx_crtc_disable; | 11015 | dev_priv->display.crtc_disable = i9xx_crtc_disable; |
10790 | dev_priv->display.off = i9xx_crtc_off; | 11016 | dev_priv->display.off = i9xx_crtc_off; |
10791 | dev_priv->display.update_plane = i9xx_update_plane; | 11017 | dev_priv->display.update_primary_plane = |
11018 | i9xx_update_primary_plane; | ||
10792 | } else { | 11019 | } else { |
10793 | dev_priv->display.get_pipe_config = i9xx_get_pipe_config; | 11020 | dev_priv->display.get_pipe_config = i9xx_get_pipe_config; |
11021 | dev_priv->display.get_plane_config = i9xx_get_plane_config; | ||
10794 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; | 11022 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; |
10795 | dev_priv->display.crtc_enable = i9xx_crtc_enable; | 11023 | dev_priv->display.crtc_enable = i9xx_crtc_enable; |
10796 | dev_priv->display.crtc_disable = i9xx_crtc_disable; | 11024 | dev_priv->display.crtc_disable = i9xx_crtc_disable; |
10797 | dev_priv->display.off = i9xx_crtc_off; | 11025 | dev_priv->display.off = i9xx_crtc_off; |
10798 | dev_priv->display.update_plane = i9xx_update_plane; | 11026 | dev_priv->display.update_primary_plane = |
11027 | i9xx_update_primary_plane; | ||
10799 | } | 11028 | } |
10800 | 11029 | ||
10801 | /* Returns the core display clock speed */ | 11030 | /* Returns the core display clock speed */ |
@@ -11053,6 +11282,7 @@ void intel_modeset_init(struct drm_device *dev) | |||
11053 | struct drm_i915_private *dev_priv = dev->dev_private; | 11282 | struct drm_i915_private *dev_priv = dev->dev_private; |
11054 | int sprite, ret; | 11283 | int sprite, ret; |
11055 | enum pipe pipe; | 11284 | enum pipe pipe; |
11285 | struct intel_crtc *crtc; | ||
11056 | 11286 | ||
11057 | drm_mode_config_init(dev); | 11287 | drm_mode_config_init(dev); |
11058 | 11288 | ||
@@ -11115,6 +11345,29 @@ void intel_modeset_init(struct drm_device *dev) | |||
11115 | mutex_lock(&dev->mode_config.mutex); | 11345 | mutex_lock(&dev->mode_config.mutex); |
11116 | intel_modeset_setup_hw_state(dev, false); | 11346 | intel_modeset_setup_hw_state(dev, false); |
11117 | mutex_unlock(&dev->mode_config.mutex); | 11347 | mutex_unlock(&dev->mode_config.mutex); |
11348 | |||
11349 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | ||
11350 | base.head) { | ||
11351 | if (!crtc->active) | ||
11352 | continue; | ||
11353 | |||
11354 | /* | ||
11355 | * Note that reserving the BIOS fb up front prevents us | ||
11356 | * from stuffing other stolen allocations like the ring | ||
11357 | * on top. This prevents some ugliness at boot time, and | ||
11358 | * can even allow for smooth boot transitions if the BIOS | ||
11359 | * fb is large enough for the active pipe configuration. | ||
11360 | */ | ||
11361 | if (dev_priv->display.get_plane_config) { | ||
11362 | dev_priv->display.get_plane_config(crtc, | ||
11363 | &crtc->plane_config); | ||
11364 | /* | ||
11365 | * If the fb is shared between multiple heads, we'll | ||
11366 | * just get the first one. | ||
11367 | */ | ||
11368 | intel_find_plane_obj(crtc, &crtc->plane_config); | ||
11369 | } | ||
11370 | } | ||
11118 | } | 11371 | } |
11119 | 11372 | ||
11120 | static void | 11373 | static void |
@@ -11484,9 +11737,32 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
11484 | 11737 | ||
11485 | void intel_modeset_gem_init(struct drm_device *dev) | 11738 | void intel_modeset_gem_init(struct drm_device *dev) |
11486 | { | 11739 | { |
11740 | struct drm_crtc *c; | ||
11741 | struct intel_framebuffer *fb; | ||
11742 | |||
11487 | intel_modeset_init_hw(dev); | 11743 | intel_modeset_init_hw(dev); |
11488 | 11744 | ||
11489 | intel_setup_overlay(dev); | 11745 | intel_setup_overlay(dev); |
11746 | |||
11747 | /* | ||
11748 | * Make sure any fbs we allocated at startup are properly | ||
11749 | * pinned & fenced. When we do the allocation it's too early | ||
11750 | * for this. | ||
11751 | */ | ||
11752 | mutex_lock(&dev->struct_mutex); | ||
11753 | list_for_each_entry(c, &dev->mode_config.crtc_list, head) { | ||
11754 | if (!c->primary->fb) | ||
11755 | continue; | ||
11756 | |||
11757 | fb = to_intel_framebuffer(c->primary->fb); | ||
11758 | if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) { | ||
11759 | DRM_ERROR("failed to pin boot fb on pipe %d\n", | ||
11760 | to_intel_crtc(c)->pipe); | ||
11761 | drm_framebuffer_unreference(c->primary->fb); | ||
11762 | c->primary->fb = NULL; | ||
11763 | } | ||
11764 | } | ||
11765 | mutex_unlock(&dev->struct_mutex); | ||
11490 | } | 11766 | } |
11491 | 11767 | ||
11492 | void intel_connector_unregister(struct intel_connector *intel_connector) | 11768 | void intel_connector_unregister(struct intel_connector *intel_connector) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f2fedc06976b..5ce5e5be7e88 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -91,7 +91,7 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | static void intel_dp_link_down(struct intel_dp *intel_dp); | 93 | static void intel_dp_link_down(struct intel_dp *intel_dp); |
94 | static void edp_panel_vdd_on(struct intel_dp *intel_dp); | 94 | static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); |
95 | static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); | 95 | static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); |
96 | 96 | ||
97 | static int | 97 | static int |
@@ -460,6 +460,9 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
460 | uint32_t status; | 460 | uint32_t status; |
461 | int try, clock = 0; | 461 | int try, clock = 0; |
462 | bool has_aux_irq = HAS_AUX_IRQ(dev); | 462 | bool has_aux_irq = HAS_AUX_IRQ(dev); |
463 | bool vdd; | ||
464 | |||
465 | vdd = _edp_panel_vdd_on(intel_dp); | ||
463 | 466 | ||
464 | /* dp aux is extremely sensitive to irq latency, hence request the | 467 | /* dp aux is extremely sensitive to irq latency, hence request the |
465 | * lowest possible wakeup latency and so prevent the cpu from going into | 468 | * lowest possible wakeup latency and so prevent the cpu from going into |
@@ -565,223 +568,130 @@ out: | |||
565 | pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); | 568 | pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); |
566 | intel_aux_display_runtime_put(dev_priv); | 569 | intel_aux_display_runtime_put(dev_priv); |
567 | 570 | ||
571 | if (vdd) | ||
572 | edp_panel_vdd_off(intel_dp, false); | ||
573 | |||
568 | return ret; | 574 | return ret; |
569 | } | 575 | } |
570 | 576 | ||
571 | /* Write data to the aux channel in native mode */ | 577 | #define HEADER_SIZE 4 |
572 | static int | 578 | static ssize_t |
573 | intel_dp_aux_native_write(struct intel_dp *intel_dp, | 579 | intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) |
574 | uint16_t address, uint8_t *send, int send_bytes) | ||
575 | { | 580 | { |
581 | struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); | ||
582 | uint8_t txbuf[20], rxbuf[20]; | ||
583 | size_t txsize, rxsize; | ||
576 | int ret; | 584 | int ret; |
577 | uint8_t msg[20]; | ||
578 | int msg_bytes; | ||
579 | uint8_t ack; | ||
580 | int retry; | ||
581 | 585 | ||
582 | if (WARN_ON(send_bytes > 16)) | 586 | txbuf[0] = msg->request << 4; |
583 | return -E2BIG; | 587 | txbuf[1] = msg->address >> 8; |
588 | txbuf[2] = msg->address & 0xff; | ||
589 | txbuf[3] = msg->size - 1; | ||
584 | 590 | ||
585 | intel_dp_check_edp(intel_dp); | 591 | switch (msg->request & ~DP_AUX_I2C_MOT) { |
586 | msg[0] = DP_AUX_NATIVE_WRITE << 4; | 592 | case DP_AUX_NATIVE_WRITE: |
587 | msg[1] = address >> 8; | 593 | case DP_AUX_I2C_WRITE: |
588 | msg[2] = address & 0xff; | 594 | txsize = HEADER_SIZE + msg->size; |
589 | msg[3] = send_bytes - 1; | 595 | rxsize = 1; |
590 | memcpy(&msg[4], send, send_bytes); | ||
591 | msg_bytes = send_bytes + 4; | ||
592 | for (retry = 0; retry < 7; retry++) { | ||
593 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); | ||
594 | if (ret < 0) | ||
595 | return ret; | ||
596 | ack >>= 4; | ||
597 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) | ||
598 | return send_bytes; | ||
599 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) | ||
600 | usleep_range(400, 500); | ||
601 | else | ||
602 | return -EIO; | ||
603 | } | ||
604 | 596 | ||
605 | DRM_ERROR("too many retries, giving up\n"); | 597 | if (WARN_ON(txsize > 20)) |
606 | return -EIO; | 598 | return -E2BIG; |
607 | } | ||
608 | 599 | ||
609 | /* Write a single byte to the aux channel in native mode */ | 600 | memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); |
610 | static int | ||
611 | intel_dp_aux_native_write_1(struct intel_dp *intel_dp, | ||
612 | uint16_t address, uint8_t byte) | ||
613 | { | ||
614 | return intel_dp_aux_native_write(intel_dp, address, &byte, 1); | ||
615 | } | ||
616 | 601 | ||
617 | /* read bytes from a native aux channel */ | 602 | ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); |
618 | static int | 603 | if (ret > 0) { |
619 | intel_dp_aux_native_read(struct intel_dp *intel_dp, | 604 | msg->reply = rxbuf[0] >> 4; |
620 | uint16_t address, uint8_t *recv, int recv_bytes) | ||
621 | { | ||
622 | uint8_t msg[4]; | ||
623 | int msg_bytes; | ||
624 | uint8_t reply[20]; | ||
625 | int reply_bytes; | ||
626 | uint8_t ack; | ||
627 | int ret; | ||
628 | int retry; | ||
629 | 605 | ||
630 | if (WARN_ON(recv_bytes > 19)) | 606 | /* Return payload size. */ |
631 | return -E2BIG; | 607 | ret = msg->size; |
608 | } | ||
609 | break; | ||
632 | 610 | ||
633 | intel_dp_check_edp(intel_dp); | 611 | case DP_AUX_NATIVE_READ: |
634 | msg[0] = DP_AUX_NATIVE_READ << 4; | 612 | case DP_AUX_I2C_READ: |
635 | msg[1] = address >> 8; | 613 | txsize = HEADER_SIZE; |
636 | msg[2] = address & 0xff; | 614 | rxsize = msg->size + 1; |
637 | msg[3] = recv_bytes - 1; | 615 | |
638 | 616 | if (WARN_ON(rxsize > 20)) | |
639 | msg_bytes = 4; | 617 | return -E2BIG; |
640 | reply_bytes = recv_bytes + 1; | 618 | |
641 | 619 | ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); | |
642 | for (retry = 0; retry < 7; retry++) { | 620 | if (ret > 0) { |
643 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, | 621 | msg->reply = rxbuf[0] >> 4; |
644 | reply, reply_bytes); | 622 | /* |
645 | if (ret == 0) | 623 | * Assume happy day, and copy the data. The caller is |
646 | return -EPROTO; | 624 | * expected to check msg->reply before touching it. |
647 | if (ret < 0) | 625 | * |
648 | return ret; | 626 | * Return payload size. |
649 | ack = reply[0] >> 4; | 627 | */ |
650 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) { | 628 | ret--; |
651 | memcpy(recv, reply + 1, ret - 1); | 629 | memcpy(msg->buffer, rxbuf + 1, ret); |
652 | return ret - 1; | ||
653 | } | 630 | } |
654 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) | 631 | break; |
655 | usleep_range(400, 500); | 632 | |
656 | else | 633 | default: |
657 | return -EIO; | 634 | ret = -EINVAL; |
635 | break; | ||
658 | } | 636 | } |
659 | 637 | ||
660 | DRM_ERROR("too many retries, giving up\n"); | 638 | return ret; |
661 | return -EIO; | ||
662 | } | 639 | } |
663 | 640 | ||
664 | static int | 641 | static void |
665 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | 642 | intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) |
666 | uint8_t write_byte, uint8_t *read_byte) | 643 | { |
667 | { | 644 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
668 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | 645 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
669 | struct intel_dp *intel_dp = container_of(adapter, | 646 | enum port port = intel_dig_port->port; |
670 | struct intel_dp, | 647 | const char *name = NULL; |
671 | adapter); | ||
672 | uint16_t address = algo_data->address; | ||
673 | uint8_t msg[5]; | ||
674 | uint8_t reply[2]; | ||
675 | unsigned retry; | ||
676 | int msg_bytes; | ||
677 | int reply_bytes; | ||
678 | int ret; | 648 | int ret; |
679 | 649 | ||
680 | edp_panel_vdd_on(intel_dp); | 650 | switch (port) { |
681 | intel_dp_check_edp(intel_dp); | 651 | case PORT_A: |
682 | /* Set up the command byte */ | 652 | intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; |
683 | if (mode & MODE_I2C_READ) | 653 | name = "DPDDC-A"; |
684 | msg[0] = DP_AUX_I2C_READ << 4; | ||
685 | else | ||
686 | msg[0] = DP_AUX_I2C_WRITE << 4; | ||
687 | |||
688 | if (!(mode & MODE_I2C_STOP)) | ||
689 | msg[0] |= DP_AUX_I2C_MOT << 4; | ||
690 | |||
691 | msg[1] = address >> 8; | ||
692 | msg[2] = address; | ||
693 | |||
694 | switch (mode) { | ||
695 | case MODE_I2C_WRITE: | ||
696 | msg[3] = 0; | ||
697 | msg[4] = write_byte; | ||
698 | msg_bytes = 5; | ||
699 | reply_bytes = 1; | ||
700 | break; | 654 | break; |
701 | case MODE_I2C_READ: | 655 | case PORT_B: |
702 | msg[3] = 0; | 656 | intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; |
703 | msg_bytes = 4; | 657 | name = "DPDDC-B"; |
704 | reply_bytes = 2; | ||
705 | break; | 658 | break; |
706 | default: | 659 | case PORT_C: |
707 | msg_bytes = 3; | 660 | intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; |
708 | reply_bytes = 1; | 661 | name = "DPDDC-C"; |
709 | break; | 662 | break; |
663 | case PORT_D: | ||
664 | intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; | ||
665 | name = "DPDDC-D"; | ||
666 | break; | ||
667 | default: | ||
668 | BUG(); | ||
710 | } | 669 | } |
711 | 670 | ||
712 | /* | 671 | if (!HAS_DDI(dev)) |
713 | * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is | 672 | intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; |
714 | * required to retry at least seven times upon receiving AUX_DEFER | ||
715 | * before giving up the AUX transaction. | ||
716 | */ | ||
717 | for (retry = 0; retry < 7; retry++) { | ||
718 | ret = intel_dp_aux_ch(intel_dp, | ||
719 | msg, msg_bytes, | ||
720 | reply, reply_bytes); | ||
721 | if (ret < 0) { | ||
722 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); | ||
723 | goto out; | ||
724 | } | ||
725 | 673 | ||
726 | switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) { | 674 | intel_dp->aux.name = name; |
727 | case DP_AUX_NATIVE_REPLY_ACK: | 675 | intel_dp->aux.dev = dev->dev; |
728 | /* I2C-over-AUX Reply field is only valid | 676 | intel_dp->aux.transfer = intel_dp_aux_transfer; |
729 | * when paired with AUX ACK. | ||
730 | */ | ||
731 | break; | ||
732 | case DP_AUX_NATIVE_REPLY_NACK: | ||
733 | DRM_DEBUG_KMS("aux_ch native nack\n"); | ||
734 | ret = -EREMOTEIO; | ||
735 | goto out; | ||
736 | case DP_AUX_NATIVE_REPLY_DEFER: | ||
737 | /* | ||
738 | * For now, just give more slack to branch devices. We | ||
739 | * could check the DPCD for I2C bit rate capabilities, | ||
740 | * and if available, adjust the interval. We could also | ||
741 | * be more careful with DP-to-Legacy adapters where a | ||
742 | * long legacy cable may force very low I2C bit rates. | ||
743 | */ | ||
744 | if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | ||
745 | DP_DWN_STRM_PORT_PRESENT) | ||
746 | usleep_range(500, 600); | ||
747 | else | ||
748 | usleep_range(300, 400); | ||
749 | continue; | ||
750 | default: | ||
751 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", | ||
752 | reply[0]); | ||
753 | ret = -EREMOTEIO; | ||
754 | goto out; | ||
755 | } | ||
756 | 677 | ||
757 | switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) { | 678 | DRM_DEBUG_KMS("registering %s bus for %s\n", name, |
758 | case DP_AUX_I2C_REPLY_ACK: | 679 | connector->base.kdev->kobj.name); |
759 | if (mode == MODE_I2C_READ) { | ||
760 | *read_byte = reply[1]; | ||
761 | } | ||
762 | ret = reply_bytes - 1; | ||
763 | goto out; | ||
764 | case DP_AUX_I2C_REPLY_NACK: | ||
765 | DRM_DEBUG_KMS("aux_i2c nack\n"); | ||
766 | ret = -EREMOTEIO; | ||
767 | goto out; | ||
768 | case DP_AUX_I2C_REPLY_DEFER: | ||
769 | DRM_DEBUG_KMS("aux_i2c defer\n"); | ||
770 | udelay(100); | ||
771 | break; | ||
772 | default: | ||
773 | DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); | ||
774 | ret = -EREMOTEIO; | ||
775 | goto out; | ||
776 | } | ||
777 | } | ||
778 | 680 | ||
779 | DRM_ERROR("too many retries, giving up\n"); | 681 | ret = drm_dp_aux_register_i2c_bus(&intel_dp->aux); |
780 | ret = -EREMOTEIO; | 682 | if (ret < 0) { |
683 | DRM_ERROR("drm_dp_aux_register_i2c_bus() for %s failed (%d)\n", | ||
684 | name, ret); | ||
685 | return; | ||
686 | } | ||
781 | 687 | ||
782 | out: | 688 | ret = sysfs_create_link(&connector->base.kdev->kobj, |
783 | edp_panel_vdd_off(intel_dp, false); | 689 | &intel_dp->aux.ddc.dev.kobj, |
784 | return ret; | 690 | intel_dp->aux.ddc.dev.kobj.name); |
691 | if (ret < 0) { | ||
692 | DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret); | ||
693 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); | ||
694 | } | ||
785 | } | 695 | } |
786 | 696 | ||
787 | static void | 697 | static void |
@@ -790,43 +700,10 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) | |||
790 | struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); | 700 | struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); |
791 | 701 | ||
792 | sysfs_remove_link(&intel_connector->base.kdev->kobj, | 702 | sysfs_remove_link(&intel_connector->base.kdev->kobj, |
793 | intel_dp->adapter.dev.kobj.name); | 703 | intel_dp->aux.ddc.dev.kobj.name); |
794 | intel_connector_unregister(intel_connector); | 704 | intel_connector_unregister(intel_connector); |
795 | } | 705 | } |
796 | 706 | ||
797 | static int | ||
798 | intel_dp_i2c_init(struct intel_dp *intel_dp, | ||
799 | struct intel_connector *intel_connector, const char *name) | ||
800 | { | ||
801 | int ret; | ||
802 | |||
803 | DRM_DEBUG_KMS("i2c_init %s\n", name); | ||
804 | intel_dp->algo.running = false; | ||
805 | intel_dp->algo.address = 0; | ||
806 | intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; | ||
807 | |||
808 | memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); | ||
809 | intel_dp->adapter.owner = THIS_MODULE; | ||
810 | intel_dp->adapter.class = I2C_CLASS_DDC; | ||
811 | strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); | ||
812 | intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; | ||
813 | intel_dp->adapter.algo_data = &intel_dp->algo; | ||
814 | intel_dp->adapter.dev.parent = intel_connector->base.dev->dev; | ||
815 | |||
816 | ret = i2c_dp_aux_add_bus(&intel_dp->adapter); | ||
817 | if (ret < 0) | ||
818 | return ret; | ||
819 | |||
820 | ret = sysfs_create_link(&intel_connector->base.kdev->kobj, | ||
821 | &intel_dp->adapter.dev.kobj, | ||
822 | intel_dp->adapter.dev.kobj.name); | ||
823 | |||
824 | if (ret < 0) | ||
825 | i2c_del_adapter(&intel_dp->adapter); | ||
826 | |||
827 | return ret; | ||
828 | } | ||
829 | |||
830 | static void | 707 | static void |
831 | intel_dp_set_clock(struct intel_encoder *encoder, | 708 | intel_dp_set_clock(struct intel_encoder *encoder, |
832 | struct intel_crtc_config *pipe_config, int link_bw) | 709 | struct intel_crtc_config *pipe_config, int link_bw) |
@@ -1162,23 +1039,21 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) | |||
1162 | return control; | 1039 | return control; |
1163 | } | 1040 | } |
1164 | 1041 | ||
1165 | static void edp_panel_vdd_on(struct intel_dp *intel_dp) | 1042 | static bool _edp_panel_vdd_on(struct intel_dp *intel_dp) |
1166 | { | 1043 | { |
1167 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1044 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1168 | struct drm_i915_private *dev_priv = dev->dev_private; | 1045 | struct drm_i915_private *dev_priv = dev->dev_private; |
1169 | u32 pp; | 1046 | u32 pp; |
1170 | u32 pp_stat_reg, pp_ctrl_reg; | 1047 | u32 pp_stat_reg, pp_ctrl_reg; |
1048 | bool need_to_disable = !intel_dp->want_panel_vdd; | ||
1171 | 1049 | ||
1172 | if (!is_edp(intel_dp)) | 1050 | if (!is_edp(intel_dp)) |
1173 | return; | 1051 | return false; |
1174 | |||
1175 | WARN(intel_dp->want_panel_vdd, | ||
1176 | "eDP VDD already requested on\n"); | ||
1177 | 1052 | ||
1178 | intel_dp->want_panel_vdd = true; | 1053 | intel_dp->want_panel_vdd = true; |
1179 | 1054 | ||
1180 | if (edp_have_panel_vdd(intel_dp)) | 1055 | if (edp_have_panel_vdd(intel_dp)) |
1181 | return; | 1056 | return need_to_disable; |
1182 | 1057 | ||
1183 | intel_runtime_pm_get(dev_priv); | 1058 | intel_runtime_pm_get(dev_priv); |
1184 | 1059 | ||
@@ -1204,6 +1079,17 @@ static void edp_panel_vdd_on(struct intel_dp *intel_dp) | |||
1204 | DRM_DEBUG_KMS("eDP was not running\n"); | 1079 | DRM_DEBUG_KMS("eDP was not running\n"); |
1205 | msleep(intel_dp->panel_power_up_delay); | 1080 | msleep(intel_dp->panel_power_up_delay); |
1206 | } | 1081 | } |
1082 | |||
1083 | return need_to_disable; | ||
1084 | } | ||
1085 | |||
1086 | void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) | ||
1087 | { | ||
1088 | if (is_edp(intel_dp)) { | ||
1089 | bool vdd = _edp_panel_vdd_on(intel_dp); | ||
1090 | |||
1091 | WARN(!vdd, "eDP VDD already requested on\n"); | ||
1092 | } | ||
1207 | } | 1093 | } |
1208 | 1094 | ||
1209 | static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) | 1095 | static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) |
@@ -1330,6 +1216,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp) | |||
1330 | 1216 | ||
1331 | edp_wait_backlight_off(intel_dp); | 1217 | edp_wait_backlight_off(intel_dp); |
1332 | 1218 | ||
1219 | WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); | ||
1220 | |||
1333 | pp = ironlake_get_pp_control(intel_dp); | 1221 | pp = ironlake_get_pp_control(intel_dp); |
1334 | /* We need to switch off panel power _and_ force vdd, for otherwise some | 1222 | /* We need to switch off panel power _and_ force vdd, for otherwise some |
1335 | * panels get very unhappy and cease to work. */ | 1223 | * panels get very unhappy and cease to work. */ |
@@ -1338,11 +1226,16 @@ void intel_edp_panel_off(struct intel_dp *intel_dp) | |||
1338 | 1226 | ||
1339 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 1227 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
1340 | 1228 | ||
1229 | intel_dp->want_panel_vdd = false; | ||
1230 | |||
1341 | I915_WRITE(pp_ctrl_reg, pp); | 1231 | I915_WRITE(pp_ctrl_reg, pp); |
1342 | POSTING_READ(pp_ctrl_reg); | 1232 | POSTING_READ(pp_ctrl_reg); |
1343 | 1233 | ||
1344 | intel_dp->last_power_cycle = jiffies; | 1234 | intel_dp->last_power_cycle = jiffies; |
1345 | wait_panel_off(intel_dp); | 1235 | wait_panel_off(intel_dp); |
1236 | |||
1237 | /* We got a reference when we enabled the VDD. */ | ||
1238 | intel_runtime_pm_put(dev_priv); | ||
1346 | } | 1239 | } |
1347 | 1240 | ||
1348 | void intel_edp_backlight_on(struct intel_dp *intel_dp) | 1241 | void intel_edp_backlight_on(struct intel_dp *intel_dp) |
@@ -1459,8 +1352,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | |||
1459 | return; | 1352 | return; |
1460 | 1353 | ||
1461 | if (mode != DRM_MODE_DPMS_ON) { | 1354 | if (mode != DRM_MODE_DPMS_ON) { |
1462 | ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, | 1355 | ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, |
1463 | DP_SET_POWER_D3); | 1356 | DP_SET_POWER_D3); |
1464 | if (ret != 1) | 1357 | if (ret != 1) |
1465 | DRM_DEBUG_DRIVER("failed to write sink power state\n"); | 1358 | DRM_DEBUG_DRIVER("failed to write sink power state\n"); |
1466 | } else { | 1359 | } else { |
@@ -1469,9 +1362,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | |||
1469 | * time to wake up. | 1362 | * time to wake up. |
1470 | */ | 1363 | */ |
1471 | for (i = 0; i < 3; i++) { | 1364 | for (i = 0; i < 3; i++) { |
1472 | ret = intel_dp_aux_native_write_1(intel_dp, | 1365 | ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, |
1473 | DP_SET_POWER, | 1366 | DP_SET_POWER_D0); |
1474 | DP_SET_POWER_D0); | ||
1475 | if (ret == 1) | 1367 | if (ret == 1) |
1476 | break; | 1368 | break; |
1477 | msleep(1); | 1369 | msleep(1); |
@@ -1695,13 +1587,11 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) | |||
1695 | 1587 | ||
1696 | /* Enable PSR in sink */ | 1588 | /* Enable PSR in sink */ |
1697 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) | 1589 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) |
1698 | intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, | 1590 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, |
1699 | DP_PSR_ENABLE & | 1591 | DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); |
1700 | ~DP_PSR_MAIN_LINK_ACTIVE); | ||
1701 | else | 1592 | else |
1702 | intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, | 1593 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, |
1703 | DP_PSR_ENABLE | | 1594 | DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); |
1704 | DP_PSR_MAIN_LINK_ACTIVE); | ||
1705 | 1595 | ||
1706 | /* Setup AUX registers */ | 1596 | /* Setup AUX registers */ |
1707 | I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); | 1597 | I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); |
@@ -1876,11 +1766,10 @@ static void intel_disable_dp(struct intel_encoder *encoder) | |||
1876 | 1766 | ||
1877 | /* Make sure the panel is off before trying to change the mode. But also | 1767 | /* Make sure the panel is off before trying to change the mode. But also |
1878 | * ensure that we have vdd while we switch off the panel. */ | 1768 | * ensure that we have vdd while we switch off the panel. */ |
1879 | edp_panel_vdd_on(intel_dp); | 1769 | intel_edp_panel_vdd_on(intel_dp); |
1880 | intel_edp_backlight_off(intel_dp); | 1770 | intel_edp_backlight_off(intel_dp); |
1881 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 1771 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
1882 | intel_edp_panel_off(intel_dp); | 1772 | intel_edp_panel_off(intel_dp); |
1883 | edp_panel_vdd_off(intel_dp, true); | ||
1884 | 1773 | ||
1885 | /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ | 1774 | /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ |
1886 | if (!(port == PORT_A || IS_VALLEYVIEW(dev))) | 1775 | if (!(port == PORT_A || IS_VALLEYVIEW(dev))) |
@@ -1910,7 +1799,7 @@ static void intel_enable_dp(struct intel_encoder *encoder) | |||
1910 | if (WARN_ON(dp_reg & DP_PORT_EN)) | 1799 | if (WARN_ON(dp_reg & DP_PORT_EN)) |
1911 | return; | 1800 | return; |
1912 | 1801 | ||
1913 | edp_panel_vdd_on(intel_dp); | 1802 | intel_edp_panel_vdd_on(intel_dp); |
1914 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | 1803 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
1915 | intel_dp_start_link_train(intel_dp); | 1804 | intel_dp_start_link_train(intel_dp); |
1916 | intel_edp_panel_on(intel_dp); | 1805 | intel_edp_panel_on(intel_dp); |
@@ -2013,26 +1902,25 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) | |||
2013 | /* | 1902 | /* |
2014 | * Native read with retry for link status and receiver capability reads for | 1903 | * Native read with retry for link status and receiver capability reads for |
2015 | * cases where the sink may still be asleep. | 1904 | * cases where the sink may still be asleep. |
1905 | * | ||
1906 | * Sinks are *supposed* to come up within 1ms from an off state, but we're also | ||
1907 | * supposed to retry 3 times per the spec. | ||
2016 | */ | 1908 | */ |
2017 | static bool | 1909 | static ssize_t |
2018 | intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, | 1910 | intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset, |
2019 | uint8_t *recv, int recv_bytes) | 1911 | void *buffer, size_t size) |
2020 | { | 1912 | { |
2021 | int ret, i; | 1913 | ssize_t ret; |
1914 | int i; | ||
2022 | 1915 | ||
2023 | /* | ||
2024 | * Sinks are *supposed* to come up within 1ms from an off state, | ||
2025 | * but we're also supposed to retry 3 times per the spec. | ||
2026 | */ | ||
2027 | for (i = 0; i < 3; i++) { | 1916 | for (i = 0; i < 3; i++) { |
2028 | ret = intel_dp_aux_native_read(intel_dp, address, recv, | 1917 | ret = drm_dp_dpcd_read(aux, offset, buffer, size); |
2029 | recv_bytes); | 1918 | if (ret == size) |
2030 | if (ret == recv_bytes) | 1919 | return ret; |
2031 | return true; | ||
2032 | msleep(1); | 1920 | msleep(1); |
2033 | } | 1921 | } |
2034 | 1922 | ||
2035 | return false; | 1923 | return ret; |
2036 | } | 1924 | } |
2037 | 1925 | ||
2038 | /* | 1926 | /* |
@@ -2042,10 +1930,10 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, | |||
2042 | static bool | 1930 | static bool |
2043 | intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) | 1931 | intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) |
2044 | { | 1932 | { |
2045 | return intel_dp_aux_native_read_retry(intel_dp, | 1933 | return intel_dp_dpcd_read_wake(&intel_dp->aux, |
2046 | DP_LANE0_1_STATUS, | 1934 | DP_LANE0_1_STATUS, |
2047 | link_status, | 1935 | link_status, |
2048 | DP_LINK_STATUS_SIZE); | 1936 | DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; |
2049 | } | 1937 | } |
2050 | 1938 | ||
2051 | /* | 1939 | /* |
@@ -2559,8 +2447,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
2559 | len = intel_dp->lane_count + 1; | 2447 | len = intel_dp->lane_count + 1; |
2560 | } | 2448 | } |
2561 | 2449 | ||
2562 | ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET, | 2450 | ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET, |
2563 | buf, len); | 2451 | buf, len); |
2564 | 2452 | ||
2565 | return ret == len; | 2453 | return ret == len; |
2566 | } | 2454 | } |
@@ -2589,9 +2477,8 @@ intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, | |||
2589 | I915_WRITE(intel_dp->output_reg, *DP); | 2477 | I915_WRITE(intel_dp->output_reg, *DP); |
2590 | POSTING_READ(intel_dp->output_reg); | 2478 | POSTING_READ(intel_dp->output_reg); |
2591 | 2479 | ||
2592 | ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET, | 2480 | ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, |
2593 | intel_dp->train_set, | 2481 | intel_dp->train_set, intel_dp->lane_count); |
2594 | intel_dp->lane_count); | ||
2595 | 2482 | ||
2596 | return ret == intel_dp->lane_count; | 2483 | return ret == intel_dp->lane_count; |
2597 | } | 2484 | } |
@@ -2647,11 +2534,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
2647 | link_config[1] = intel_dp->lane_count; | 2534 | link_config[1] = intel_dp->lane_count; |
2648 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) | 2535 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) |
2649 | link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 2536 | link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
2650 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2); | 2537 | drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); |
2651 | 2538 | ||
2652 | link_config[0] = 0; | 2539 | link_config[0] = 0; |
2653 | link_config[1] = DP_SET_ANSI_8B10B; | 2540 | link_config[1] = DP_SET_ANSI_8B10B; |
2654 | intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2); | 2541 | drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); |
2655 | 2542 | ||
2656 | DP |= DP_PORT_EN; | 2543 | DP |= DP_PORT_EN; |
2657 | 2544 | ||
@@ -2894,8 +2781,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
2894 | 2781 | ||
2895 | char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; | 2782 | char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; |
2896 | 2783 | ||
2897 | if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, | 2784 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, |
2898 | sizeof(intel_dp->dpcd)) == 0) | 2785 | sizeof(intel_dp->dpcd)) < 0) |
2899 | return false; /* aux transfer failed */ | 2786 | return false; /* aux transfer failed */ |
2900 | 2787 | ||
2901 | hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), | 2788 | hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), |
@@ -2908,9 +2795,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
2908 | /* Check if the panel supports PSR */ | 2795 | /* Check if the panel supports PSR */ |
2909 | memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); | 2796 | memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); |
2910 | if (is_edp(intel_dp)) { | 2797 | if (is_edp(intel_dp)) { |
2911 | intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, | 2798 | intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT, |
2912 | intel_dp->psr_dpcd, | 2799 | intel_dp->psr_dpcd, |
2913 | sizeof(intel_dp->psr_dpcd)); | 2800 | sizeof(intel_dp->psr_dpcd)); |
2914 | if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { | 2801 | if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { |
2915 | dev_priv->psr.sink_support = true; | 2802 | dev_priv->psr.sink_support = true; |
2916 | DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); | 2803 | DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); |
@@ -2932,9 +2819,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
2932 | if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) | 2819 | if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) |
2933 | return true; /* no per-port downstream info */ | 2820 | return true; /* no per-port downstream info */ |
2934 | 2821 | ||
2935 | if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, | 2822 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, |
2936 | intel_dp->downstream_ports, | 2823 | intel_dp->downstream_ports, |
2937 | DP_MAX_DOWNSTREAM_PORTS) == 0) | 2824 | DP_MAX_DOWNSTREAM_PORTS) < 0) |
2938 | return false; /* downstream port status fetch failed */ | 2825 | return false; /* downstream port status fetch failed */ |
2939 | 2826 | ||
2940 | return true; | 2827 | return true; |
@@ -2948,13 +2835,13 @@ intel_dp_probe_oui(struct intel_dp *intel_dp) | |||
2948 | if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) | 2835 | if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) |
2949 | return; | 2836 | return; |
2950 | 2837 | ||
2951 | edp_panel_vdd_on(intel_dp); | 2838 | intel_edp_panel_vdd_on(intel_dp); |
2952 | 2839 | ||
2953 | if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) | 2840 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3) |
2954 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", | 2841 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", |
2955 | buf[0], buf[1], buf[2]); | 2842 | buf[0], buf[1], buf[2]); |
2956 | 2843 | ||
2957 | if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) | 2844 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3) |
2958 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", | 2845 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", |
2959 | buf[0], buf[1], buf[2]); | 2846 | buf[0], buf[1], buf[2]); |
2960 | 2847 | ||
@@ -2969,46 +2856,40 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) | |||
2969 | to_intel_crtc(intel_dig_port->base.base.crtc); | 2856 | to_intel_crtc(intel_dig_port->base.base.crtc); |
2970 | u8 buf[1]; | 2857 | u8 buf[1]; |
2971 | 2858 | ||
2972 | if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1)) | 2859 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) |
2973 | return -EAGAIN; | 2860 | return -EAGAIN; |
2974 | 2861 | ||
2975 | if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) | 2862 | if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) |
2976 | return -ENOTTY; | 2863 | return -ENOTTY; |
2977 | 2864 | ||
2978 | if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, | 2865 | if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, |
2979 | DP_TEST_SINK_START)) | 2866 | DP_TEST_SINK_START) < 0) |
2980 | return -EAGAIN; | 2867 | return -EAGAIN; |
2981 | 2868 | ||
2982 | /* Wait 2 vblanks to be sure we will have the correct CRC value */ | 2869 | /* Wait 2 vblanks to be sure we will have the correct CRC value */ |
2983 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2870 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2984 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2871 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2985 | 2872 | ||
2986 | if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6)) | 2873 | if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) |
2987 | return -EAGAIN; | 2874 | return -EAGAIN; |
2988 | 2875 | ||
2989 | intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0); | 2876 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); |
2990 | return 0; | 2877 | return 0; |
2991 | } | 2878 | } |
2992 | 2879 | ||
2993 | static bool | 2880 | static bool |
2994 | intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) | 2881 | intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) |
2995 | { | 2882 | { |
2996 | int ret; | 2883 | return intel_dp_dpcd_read_wake(&intel_dp->aux, |
2997 | 2884 | DP_DEVICE_SERVICE_IRQ_VECTOR, | |
2998 | ret = intel_dp_aux_native_read_retry(intel_dp, | 2885 | sink_irq_vector, 1) == 1; |
2999 | DP_DEVICE_SERVICE_IRQ_VECTOR, | ||
3000 | sink_irq_vector, 1); | ||
3001 | if (!ret) | ||
3002 | return false; | ||
3003 | |||
3004 | return true; | ||
3005 | } | 2886 | } |
3006 | 2887 | ||
3007 | static void | 2888 | static void |
3008 | intel_dp_handle_test_request(struct intel_dp *intel_dp) | 2889 | intel_dp_handle_test_request(struct intel_dp *intel_dp) |
3009 | { | 2890 | { |
3010 | /* NAK by default */ | 2891 | /* NAK by default */ |
3011 | intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); | 2892 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK); |
3012 | } | 2893 | } |
3013 | 2894 | ||
3014 | /* | 2895 | /* |
@@ -3047,9 +2928,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
3047 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | 2928 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
3048 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { | 2929 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { |
3049 | /* Clear interrupt source */ | 2930 | /* Clear interrupt source */ |
3050 | intel_dp_aux_native_write_1(intel_dp, | 2931 | drm_dp_dpcd_writeb(&intel_dp->aux, |
3051 | DP_DEVICE_SERVICE_IRQ_VECTOR, | 2932 | DP_DEVICE_SERVICE_IRQ_VECTOR, |
3052 | sink_irq_vector); | 2933 | sink_irq_vector); |
3053 | 2934 | ||
3054 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) | 2935 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) |
3055 | intel_dp_handle_test_request(intel_dp); | 2936 | intel_dp_handle_test_request(intel_dp); |
@@ -3084,15 +2965,17 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) | |||
3084 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | 2965 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
3085 | intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { | 2966 | intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { |
3086 | uint8_t reg; | 2967 | uint8_t reg; |
3087 | if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, | 2968 | |
3088 | ®, 1)) | 2969 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT, |
2970 | ®, 1) < 0) | ||
3089 | return connector_status_unknown; | 2971 | return connector_status_unknown; |
2972 | |||
3090 | return DP_GET_SINK_COUNT(reg) ? connector_status_connected | 2973 | return DP_GET_SINK_COUNT(reg) ? connector_status_connected |
3091 | : connector_status_disconnected; | 2974 | : connector_status_disconnected; |
3092 | } | 2975 | } |
3093 | 2976 | ||
3094 | /* If no HPD, poke DDC gently */ | 2977 | /* If no HPD, poke DDC gently */ |
3095 | if (drm_probe_ddc(&intel_dp->adapter)) | 2978 | if (drm_probe_ddc(&intel_dp->aux.ddc)) |
3096 | return connector_status_connected; | 2979 | return connector_status_connected; |
3097 | 2980 | ||
3098 | /* Well we tried, say unknown for unreliable port types */ | 2981 | /* Well we tried, say unknown for unreliable port types */ |
@@ -3260,7 +3143,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
3260 | if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { | 3143 | if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { |
3261 | intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); | 3144 | intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); |
3262 | } else { | 3145 | } else { |
3263 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); | 3146 | edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc); |
3264 | if (edid) { | 3147 | if (edid) { |
3265 | intel_dp->has_audio = drm_detect_monitor_audio(edid); | 3148 | intel_dp->has_audio = drm_detect_monitor_audio(edid); |
3266 | kfree(edid); | 3149 | kfree(edid); |
@@ -3296,7 +3179,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
3296 | power_domain = intel_display_port_power_domain(intel_encoder); | 3179 | power_domain = intel_display_port_power_domain(intel_encoder); |
3297 | intel_display_power_get(dev_priv, power_domain); | 3180 | intel_display_power_get(dev_priv, power_domain); |
3298 | 3181 | ||
3299 | ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); | 3182 | ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc); |
3300 | intel_display_power_put(dev_priv, power_domain); | 3183 | intel_display_power_put(dev_priv, power_domain); |
3301 | if (ret) | 3184 | if (ret) |
3302 | return ret; | 3185 | return ret; |
@@ -3329,7 +3212,7 @@ intel_dp_detect_audio(struct drm_connector *connector) | |||
3329 | power_domain = intel_display_port_power_domain(intel_encoder); | 3212 | power_domain = intel_display_port_power_domain(intel_encoder); |
3330 | intel_display_power_get(dev_priv, power_domain); | 3213 | intel_display_power_get(dev_priv, power_domain); |
3331 | 3214 | ||
3332 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); | 3215 | edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc); |
3333 | if (edid) { | 3216 | if (edid) { |
3334 | has_audio = drm_detect_monitor_audio(edid); | 3217 | has_audio = drm_detect_monitor_audio(edid); |
3335 | kfree(edid); | 3218 | kfree(edid); |
@@ -3451,7 +3334,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
3451 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 3334 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
3452 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 3335 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
3453 | 3336 | ||
3454 | i2c_del_adapter(&intel_dp->adapter); | 3337 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); |
3455 | drm_encoder_cleanup(encoder); | 3338 | drm_encoder_cleanup(encoder); |
3456 | if (is_edp(intel_dp)) { | 3339 | if (is_edp(intel_dp)) { |
3457 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 3340 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
@@ -3745,7 +3628,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
3745 | return true; | 3628 | return true; |
3746 | 3629 | ||
3747 | /* Cache DPCD and EDID for edp. */ | 3630 | /* Cache DPCD and EDID for edp. */ |
3748 | edp_panel_vdd_on(intel_dp); | 3631 | intel_edp_panel_vdd_on(intel_dp); |
3749 | has_dpcd = intel_dp_get_dpcd(intel_dp); | 3632 | has_dpcd = intel_dp_get_dpcd(intel_dp); |
3750 | edp_panel_vdd_off(intel_dp, false); | 3633 | edp_panel_vdd_off(intel_dp, false); |
3751 | 3634 | ||
@@ -3764,7 +3647,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
3764 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); | 3647 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); |
3765 | 3648 | ||
3766 | mutex_lock(&dev->mode_config.mutex); | 3649 | mutex_lock(&dev->mode_config.mutex); |
3767 | edid = drm_get_edid(connector, &intel_dp->adapter); | 3650 | edid = drm_get_edid(connector, &intel_dp->aux.ddc); |
3768 | if (edid) { | 3651 | if (edid) { |
3769 | if (drm_add_edid_modes(connector, edid)) { | 3652 | if (drm_add_edid_modes(connector, edid)) { |
3770 | drm_mode_connector_update_edid_property(connector, | 3653 | drm_mode_connector_update_edid_property(connector, |
@@ -3813,8 +3696,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
3813 | struct drm_i915_private *dev_priv = dev->dev_private; | 3696 | struct drm_i915_private *dev_priv = dev->dev_private; |
3814 | enum port port = intel_dig_port->port; | 3697 | enum port port = intel_dig_port->port; |
3815 | struct edp_power_seq power_seq = { 0 }; | 3698 | struct edp_power_seq power_seq = { 0 }; |
3816 | const char *name = NULL; | 3699 | int type; |
3817 | int type, error; | ||
3818 | 3700 | ||
3819 | /* intel_dp vfuncs */ | 3701 | /* intel_dp vfuncs */ |
3820 | if (IS_VALLEYVIEW(dev)) | 3702 | if (IS_VALLEYVIEW(dev)) |
@@ -3867,43 +3749,19 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
3867 | intel_connector->get_hw_state = intel_connector_get_hw_state; | 3749 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
3868 | intel_connector->unregister = intel_dp_connector_unregister; | 3750 | intel_connector->unregister = intel_dp_connector_unregister; |
3869 | 3751 | ||
3870 | intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; | 3752 | /* Set up the hotplug pin. */ |
3871 | if (HAS_DDI(dev)) { | ||
3872 | switch (intel_dig_port->port) { | ||
3873 | case PORT_A: | ||
3874 | intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; | ||
3875 | break; | ||
3876 | case PORT_B: | ||
3877 | intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; | ||
3878 | break; | ||
3879 | case PORT_C: | ||
3880 | intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; | ||
3881 | break; | ||
3882 | case PORT_D: | ||
3883 | intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; | ||
3884 | break; | ||
3885 | default: | ||
3886 | BUG(); | ||
3887 | } | ||
3888 | } | ||
3889 | |||
3890 | /* Set up the DDC bus. */ | ||
3891 | switch (port) { | 3753 | switch (port) { |
3892 | case PORT_A: | 3754 | case PORT_A: |
3893 | intel_encoder->hpd_pin = HPD_PORT_A; | 3755 | intel_encoder->hpd_pin = HPD_PORT_A; |
3894 | name = "DPDDC-A"; | ||
3895 | break; | 3756 | break; |
3896 | case PORT_B: | 3757 | case PORT_B: |
3897 | intel_encoder->hpd_pin = HPD_PORT_B; | 3758 | intel_encoder->hpd_pin = HPD_PORT_B; |
3898 | name = "DPDDC-B"; | ||
3899 | break; | 3759 | break; |
3900 | case PORT_C: | 3760 | case PORT_C: |
3901 | intel_encoder->hpd_pin = HPD_PORT_C; | 3761 | intel_encoder->hpd_pin = HPD_PORT_C; |
3902 | name = "DPDDC-C"; | ||
3903 | break; | 3762 | break; |
3904 | case PORT_D: | 3763 | case PORT_D: |
3905 | intel_encoder->hpd_pin = HPD_PORT_D; | 3764 | intel_encoder->hpd_pin = HPD_PORT_D; |
3906 | name = "DPDDC-D"; | ||
3907 | break; | 3765 | break; |
3908 | default: | 3766 | default: |
3909 | BUG(); | 3767 | BUG(); |
@@ -3914,14 +3772,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
3914 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); | 3772 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); |
3915 | } | 3773 | } |
3916 | 3774 | ||
3917 | error = intel_dp_i2c_init(intel_dp, intel_connector, name); | 3775 | intel_dp_aux_init(intel_dp, intel_connector); |
3918 | WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", | ||
3919 | error, port_name(port)); | ||
3920 | 3776 | ||
3921 | intel_dp->psr_setup_done = false; | 3777 | intel_dp->psr_setup_done = false; |
3922 | 3778 | ||
3923 | if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { | 3779 | if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { |
3924 | i2c_del_adapter(&intel_dp->adapter); | 3780 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); |
3925 | if (is_edp(intel_dp)) { | 3781 | if (is_edp(intel_dp)) { |
3926 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 3782 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
3927 | mutex_lock(&dev->mode_config.mutex); | 3783 | mutex_lock(&dev->mode_config.mutex); |
@@ -3991,7 +3847,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) | |||
3991 | 3847 | ||
3992 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | 3848 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
3993 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 3849 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
3994 | intel_encoder->cloneable = false; | 3850 | intel_encoder->cloneable = 0; |
3995 | intel_encoder->hot_plug = intel_dp_hot_plug; | 3851 | intel_encoder->hot_plug = intel_dp_hot_plug; |
3996 | 3852 | ||
3997 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { | 3853 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9c7090590776..fa9910481ab0 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -78,6 +78,12 @@ | |||
78 | #define MAX_OUTPUTS 6 | 78 | #define MAX_OUTPUTS 6 |
79 | /* maximum connectors per crtcs in the mode set */ | 79 | /* maximum connectors per crtcs in the mode set */ |
80 | 80 | ||
81 | /* Maximum cursor sizes */ | ||
82 | #define GEN2_CURSOR_WIDTH 64 | ||
83 | #define GEN2_CURSOR_HEIGHT 64 | ||
84 | #define CURSOR_WIDTH 256 | ||
85 | #define CURSOR_HEIGHT 256 | ||
86 | |||
81 | #define INTEL_I2C_BUS_DVO 1 | 87 | #define INTEL_I2C_BUS_DVO 1 |
82 | #define INTEL_I2C_BUS_SDVO 2 | 88 | #define INTEL_I2C_BUS_SDVO 2 |
83 | 89 | ||
@@ -113,6 +119,7 @@ struct intel_fbdev { | |||
113 | struct intel_framebuffer *fb; | 119 | struct intel_framebuffer *fb; |
114 | struct list_head fbdev_list; | 120 | struct list_head fbdev_list; |
115 | struct drm_display_mode *our_mode; | 121 | struct drm_display_mode *our_mode; |
122 | int preferred_bpp; | ||
116 | }; | 123 | }; |
117 | 124 | ||
118 | struct intel_encoder { | 125 | struct intel_encoder { |
@@ -124,11 +131,7 @@ struct intel_encoder { | |||
124 | struct intel_crtc *new_crtc; | 131 | struct intel_crtc *new_crtc; |
125 | 132 | ||
126 | int type; | 133 | int type; |
127 | /* | 134 | unsigned int cloneable; |
128 | * Intel hw has only one MUX where encoders could be clone, hence a | ||
129 | * simple flag is enough to compute the possible_clones mask. | ||
130 | */ | ||
131 | bool cloneable; | ||
132 | bool connectors_active; | 135 | bool connectors_active; |
133 | void (*hot_plug)(struct intel_encoder *); | 136 | void (*hot_plug)(struct intel_encoder *); |
134 | bool (*compute_config)(struct intel_encoder *, | 137 | bool (*compute_config)(struct intel_encoder *, |
@@ -218,6 +221,12 @@ typedef struct dpll { | |||
218 | int p; | 221 | int p; |
219 | } intel_clock_t; | 222 | } intel_clock_t; |
220 | 223 | ||
224 | struct intel_plane_config { | ||
225 | bool tiled; | ||
226 | int size; | ||
227 | u32 base; | ||
228 | }; | ||
229 | |||
221 | struct intel_crtc_config { | 230 | struct intel_crtc_config { |
222 | /** | 231 | /** |
223 | * quirks - bitfield with hw state readout quirks | 232 | * quirks - bitfield with hw state readout quirks |
@@ -364,8 +373,10 @@ struct intel_crtc { | |||
364 | uint32_t cursor_addr; | 373 | uint32_t cursor_addr; |
365 | int16_t cursor_x, cursor_y; | 374 | int16_t cursor_x, cursor_y; |
366 | int16_t cursor_width, cursor_height; | 375 | int16_t cursor_width, cursor_height; |
376 | int16_t max_cursor_width, max_cursor_height; | ||
367 | bool cursor_visible; | 377 | bool cursor_visible; |
368 | 378 | ||
379 | struct intel_plane_config plane_config; | ||
369 | struct intel_crtc_config config; | 380 | struct intel_crtc_config config; |
370 | struct intel_crtc_config *new_config; | 381 | struct intel_crtc_config *new_config; |
371 | bool new_enabled; | 382 | bool new_enabled; |
@@ -485,8 +496,7 @@ struct intel_dp { | |||
485 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; | 496 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
486 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; | 497 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; |
487 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; | 498 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; |
488 | struct i2c_adapter adapter; | 499 | struct drm_dp_aux aux; |
489 | struct i2c_algo_dp_aux_data algo; | ||
490 | uint8_t train_set[4]; | 500 | uint8_t train_set[4]; |
491 | int panel_power_up_delay; | 501 | int panel_power_up_delay; |
492 | int panel_power_down_delay; | 502 | int panel_power_down_delay; |
@@ -618,8 +628,8 @@ void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); | |||
618 | void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 628 | void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
619 | void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 629 | void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
620 | void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 630 | void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
621 | void hsw_pc8_disable_interrupts(struct drm_device *dev); | 631 | void hsw_runtime_pm_disable_interrupts(struct drm_device *dev); |
622 | void hsw_pc8_restore_interrupts(struct drm_device *dev); | 632 | void hsw_runtime_pm_restore_interrupts(struct drm_device *dev); |
623 | 633 | ||
624 | 634 | ||
625 | /* intel_crt.c */ | 635 | /* intel_crt.c */ |
@@ -722,9 +732,8 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y, | |||
722 | unsigned int bpp, | 732 | unsigned int bpp, |
723 | unsigned int pitch); | 733 | unsigned int pitch); |
724 | void intel_display_handle_reset(struct drm_device *dev); | 734 | void intel_display_handle_reset(struct drm_device *dev); |
725 | void hsw_enable_pc8_work(struct work_struct *__work); | 735 | void hsw_enable_pc8(struct drm_i915_private *dev_priv); |
726 | void hsw_enable_package_c8(struct drm_i915_private *dev_priv); | 736 | void hsw_disable_pc8(struct drm_i915_private *dev_priv); |
727 | void hsw_disable_package_c8(struct drm_i915_private *dev_priv); | ||
728 | void intel_dp_get_m_n(struct intel_crtc *crtc, | 737 | void intel_dp_get_m_n(struct intel_crtc *crtc, |
729 | struct intel_crtc_config *pipe_config); | 738 | struct intel_crtc_config *pipe_config); |
730 | int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); | 739 | int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); |
@@ -740,6 +749,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder); | |||
740 | int valleyview_get_vco(struct drm_i915_private *dev_priv); | 749 | int valleyview_get_vco(struct drm_i915_private *dev_priv); |
741 | void intel_mode_from_pipe_config(struct drm_display_mode *mode, | 750 | void intel_mode_from_pipe_config(struct drm_display_mode *mode, |
742 | struct intel_crtc_config *pipe_config); | 751 | struct intel_crtc_config *pipe_config); |
752 | int intel_format_to_fourcc(int format); | ||
743 | 753 | ||
744 | /* intel_dp.c */ | 754 | /* intel_dp.c */ |
745 | void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); | 755 | void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); |
@@ -757,6 +767,7 @@ bool intel_dp_compute_config(struct intel_encoder *encoder, | |||
757 | bool intel_dp_is_edp(struct drm_device *dev, enum port port); | 767 | bool intel_dp_is_edp(struct drm_device *dev, enum port port); |
758 | void intel_edp_backlight_on(struct intel_dp *intel_dp); | 768 | void intel_edp_backlight_on(struct intel_dp *intel_dp); |
759 | void intel_edp_backlight_off(struct intel_dp *intel_dp); | 769 | void intel_edp_backlight_off(struct intel_dp *intel_dp); |
770 | void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); | ||
760 | void intel_edp_panel_on(struct intel_dp *intel_dp); | 771 | void intel_edp_panel_on(struct intel_dp *intel_dp); |
761 | void intel_edp_panel_off(struct intel_dp *intel_dp); | 772 | void intel_edp_panel_off(struct intel_dp *intel_dp); |
762 | void intel_edp_psr_enable(struct intel_dp *intel_dp); | 773 | void intel_edp_psr_enable(struct intel_dp *intel_dp); |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index cf7322e95278..33656647f8bc 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -620,7 +620,7 @@ bool intel_dsi_init(struct drm_device *dev) | |||
620 | intel_encoder->type = INTEL_OUTPUT_DSI; | 620 | intel_encoder->type = INTEL_OUTPUT_DSI; |
621 | intel_encoder->crtc_mask = (1 << 0); /* XXX */ | 621 | intel_encoder->crtc_mask = (1 << 0); /* XXX */ |
622 | 622 | ||
623 | intel_encoder->cloneable = false; | 623 | intel_encoder->cloneable = 0; |
624 | drm_connector_init(dev, connector, &intel_dsi_connector_funcs, | 624 | drm_connector_init(dev, connector, &intel_dsi_connector_funcs, |
625 | DRM_MODE_CONNECTOR_DSI); | 625 | DRM_MODE_CONNECTOR_DSI); |
626 | 626 | ||
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 86eeb8b7d435..7fe3feedfe03 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -522,14 +522,15 @@ void intel_dvo_init(struct drm_device *dev) | |||
522 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 522 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
523 | switch (dvo->type) { | 523 | switch (dvo->type) { |
524 | case INTEL_DVO_CHIP_TMDS: | 524 | case INTEL_DVO_CHIP_TMDS: |
525 | intel_encoder->cloneable = true; | 525 | intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) | |
526 | (1 << INTEL_OUTPUT_DVO); | ||
526 | drm_connector_init(dev, connector, | 527 | drm_connector_init(dev, connector, |
527 | &intel_dvo_connector_funcs, | 528 | &intel_dvo_connector_funcs, |
528 | DRM_MODE_CONNECTOR_DVII); | 529 | DRM_MODE_CONNECTOR_DVII); |
529 | encoder_type = DRM_MODE_ENCODER_TMDS; | 530 | encoder_type = DRM_MODE_ENCODER_TMDS; |
530 | break; | 531 | break; |
531 | case INTEL_DVO_CHIP_LVDS: | 532 | case INTEL_DVO_CHIP_LVDS: |
532 | intel_encoder->cloneable = false; | 533 | intel_encoder->cloneable = 0; |
533 | drm_connector_init(dev, connector, | 534 | drm_connector_init(dev, connector, |
534 | &intel_dvo_connector_funcs, | 535 | &intel_dvo_connector_funcs, |
535 | DRM_MODE_CONNECTOR_LVDS); | 536 | DRM_MODE_CONNECTOR_LVDS); |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 6b5beed28d70..2b1d42dbfe13 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -128,6 +128,7 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
128 | struct drm_framebuffer *fb; | 128 | struct drm_framebuffer *fb; |
129 | struct drm_i915_gem_object *obj; | 129 | struct drm_i915_gem_object *obj; |
130 | int size, ret; | 130 | int size, ret; |
131 | bool prealloc = false; | ||
131 | 132 | ||
132 | mutex_lock(&dev->struct_mutex); | 133 | mutex_lock(&dev->struct_mutex); |
133 | 134 | ||
@@ -139,6 +140,7 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
139 | intel_fb = ifbdev->fb; | 140 | intel_fb = ifbdev->fb; |
140 | } else { | 141 | } else { |
141 | DRM_DEBUG_KMS("re-using BIOS fb\n"); | 142 | DRM_DEBUG_KMS("re-using BIOS fb\n"); |
143 | prealloc = true; | ||
142 | sizes->fb_width = intel_fb->base.width; | 144 | sizes->fb_width = intel_fb->base.width; |
143 | sizes->fb_height = intel_fb->base.height; | 145 | sizes->fb_height = intel_fb->base.height; |
144 | } | 146 | } |
@@ -200,7 +202,7 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
200 | * If the object is stolen however, it will be full of whatever | 202 | * If the object is stolen however, it will be full of whatever |
201 | * garbage was left in there. | 203 | * garbage was left in there. |
202 | */ | 204 | */ |
203 | if (ifbdev->fb->obj->stolen) | 205 | if (ifbdev->fb->obj->stolen && !prealloc) |
204 | memset_io(info->screen_base, 0, info->screen_size); | 206 | memset_io(info->screen_base, 0, info->screen_size); |
205 | 207 | ||
206 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ | 208 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
@@ -454,27 +456,149 @@ static void intel_fbdev_destroy(struct drm_device *dev, | |||
454 | drm_framebuffer_remove(&ifbdev->fb->base); | 456 | drm_framebuffer_remove(&ifbdev->fb->base); |
455 | } | 457 | } |
456 | 458 | ||
459 | /* | ||
460 | * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible. | ||
461 | * The core display code will have read out the current plane configuration, | ||
462 | * so we use that to figure out if there's an object for us to use as the | ||
463 | * fb, and if so, we re-use it for the fbdev configuration. | ||
464 | * | ||
465 | * Note we only support a single fb shared across pipes for boot (mostly for | ||
466 | * fbcon), so we just find the biggest and use that. | ||
467 | */ | ||
468 | static bool intel_fbdev_init_bios(struct drm_device *dev, | ||
469 | struct intel_fbdev *ifbdev) | ||
470 | { | ||
471 | struct intel_framebuffer *fb = NULL; | ||
472 | struct drm_crtc *crtc; | ||
473 | struct intel_crtc *intel_crtc; | ||
474 | struct intel_plane_config *plane_config = NULL; | ||
475 | unsigned int max_size = 0; | ||
476 | |||
477 | if (!i915.fastboot) | ||
478 | return false; | ||
479 | |||
480 | /* Find the largest fb */ | ||
481 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
482 | intel_crtc = to_intel_crtc(crtc); | ||
483 | |||
484 | if (!intel_crtc->active || !crtc->primary->fb) { | ||
485 | DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", | ||
486 | pipe_name(intel_crtc->pipe)); | ||
487 | continue; | ||
488 | } | ||
489 | |||
490 | if (intel_crtc->plane_config.size > max_size) { | ||
491 | DRM_DEBUG_KMS("found possible fb from plane %c\n", | ||
492 | pipe_name(intel_crtc->pipe)); | ||
493 | plane_config = &intel_crtc->plane_config; | ||
494 | fb = to_intel_framebuffer(crtc->primary->fb); | ||
495 | max_size = plane_config->size; | ||
496 | } | ||
497 | } | ||
498 | |||
499 | if (!fb) { | ||
500 | DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n"); | ||
501 | goto out; | ||
502 | } | ||
503 | |||
504 | /* Now make sure all the pipes will fit into it */ | ||
505 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
506 | unsigned int cur_size; | ||
507 | |||
508 | intel_crtc = to_intel_crtc(crtc); | ||
509 | |||
510 | if (!intel_crtc->active) { | ||
511 | DRM_DEBUG_KMS("pipe %c not active, skipping\n", | ||
512 | pipe_name(intel_crtc->pipe)); | ||
513 | continue; | ||
514 | } | ||
515 | |||
516 | DRM_DEBUG_KMS("checking plane %c for BIOS fb\n", | ||
517 | pipe_name(intel_crtc->pipe)); | ||
518 | |||
519 | /* | ||
520 | * See if the plane fb we found above will fit on this | ||
521 | * pipe. Note we need to use the selected fb's bpp rather | ||
522 | * than the current pipe's, since they could be different. | ||
523 | */ | ||
524 | cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay * | ||
525 | intel_crtc->config.adjusted_mode.crtc_vdisplay; | ||
526 | DRM_DEBUG_KMS("pipe %c area: %d\n", pipe_name(intel_crtc->pipe), | ||
527 | cur_size); | ||
528 | cur_size *= fb->base.bits_per_pixel / 8; | ||
529 | DRM_DEBUG_KMS("total size %d (bpp %d)\n", cur_size, | ||
530 | fb->base.bits_per_pixel / 8); | ||
531 | |||
532 | if (cur_size > max_size) { | ||
533 | DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n", | ||
534 | pipe_name(intel_crtc->pipe), | ||
535 | cur_size, max_size); | ||
536 | plane_config = NULL; | ||
537 | fb = NULL; | ||
538 | break; | ||
539 | } | ||
540 | |||
541 | DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n", | ||
542 | pipe_name(intel_crtc->pipe), | ||
543 | max_size, cur_size); | ||
544 | } | ||
545 | |||
546 | if (!fb) { | ||
547 | DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n"); | ||
548 | goto out; | ||
549 | } | ||
550 | |||
551 | ifbdev->preferred_bpp = fb->base.bits_per_pixel; | ||
552 | ifbdev->fb = fb; | ||
553 | |||
554 | drm_framebuffer_reference(&ifbdev->fb->base); | ||
555 | |||
556 | /* Final pass to check if any active pipes don't have fbs */ | ||
557 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
558 | intel_crtc = to_intel_crtc(crtc); | ||
559 | |||
560 | if (!intel_crtc->active) | ||
561 | continue; | ||
562 | |||
563 | WARN(!crtc->primary->fb, | ||
564 | "re-used BIOS config but lost an fb on crtc %d\n", | ||
565 | crtc->base.id); | ||
566 | } | ||
567 | |||
568 | |||
569 | DRM_DEBUG_KMS("using BIOS fb for initial console\n"); | ||
570 | return true; | ||
571 | |||
572 | out: | ||
573 | |||
574 | return false; | ||
575 | } | ||
576 | |||
457 | int intel_fbdev_init(struct drm_device *dev) | 577 | int intel_fbdev_init(struct drm_device *dev) |
458 | { | 578 | { |
459 | struct intel_fbdev *ifbdev; | 579 | struct intel_fbdev *ifbdev; |
460 | struct drm_i915_private *dev_priv = dev->dev_private; | 580 | struct drm_i915_private *dev_priv = dev->dev_private; |
461 | int ret; | 581 | int ret; |
462 | 582 | ||
463 | ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL); | 583 | if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0)) |
464 | if (!ifbdev) | 584 | return -ENODEV; |
585 | |||
586 | ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); | ||
587 | if (ifbdev == NULL) | ||
465 | return -ENOMEM; | 588 | return -ENOMEM; |
466 | 589 | ||
467 | dev_priv->fbdev = ifbdev; | ||
468 | ifbdev->helper.funcs = &intel_fb_helper_funcs; | 590 | ifbdev->helper.funcs = &intel_fb_helper_funcs; |
591 | if (!intel_fbdev_init_bios(dev, ifbdev)) | ||
592 | ifbdev->preferred_bpp = 32; | ||
469 | 593 | ||
470 | ret = drm_fb_helper_init(dev, &ifbdev->helper, | 594 | ret = drm_fb_helper_init(dev, &ifbdev->helper, |
471 | INTEL_INFO(dev)->num_pipes, | 595 | INTEL_INFO(dev)->num_pipes, 4); |
472 | 4); | ||
473 | if (ret) { | 596 | if (ret) { |
474 | kfree(ifbdev); | 597 | kfree(ifbdev); |
475 | return ret; | 598 | return ret; |
476 | } | 599 | } |
477 | 600 | ||
601 | dev_priv->fbdev = ifbdev; | ||
478 | drm_fb_helper_single_add_all_connectors(&ifbdev->helper); | 602 | drm_fb_helper_single_add_all_connectors(&ifbdev->helper); |
479 | 603 | ||
480 | return 0; | 604 | return 0; |
@@ -483,9 +607,10 @@ int intel_fbdev_init(struct drm_device *dev) | |||
483 | void intel_fbdev_initial_config(struct drm_device *dev) | 607 | void intel_fbdev_initial_config(struct drm_device *dev) |
484 | { | 608 | { |
485 | struct drm_i915_private *dev_priv = dev->dev_private; | 609 | struct drm_i915_private *dev_priv = dev->dev_private; |
610 | struct intel_fbdev *ifbdev = dev_priv->fbdev; | ||
486 | 611 | ||
487 | /* Due to peculiar init order wrt to hpd handling this is separate. */ | 612 | /* Due to peculiar init order wrt to hpd handling this is separate. */ |
488 | drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32); | 613 | drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp); |
489 | } | 614 | } |
490 | 615 | ||
491 | void intel_fbdev_fini(struct drm_device *dev) | 616 | void intel_fbdev_fini(struct drm_device *dev) |
@@ -523,7 +648,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state) | |||
523 | void intel_fbdev_output_poll_changed(struct drm_device *dev) | 648 | void intel_fbdev_output_poll_changed(struct drm_device *dev) |
524 | { | 649 | { |
525 | struct drm_i915_private *dev_priv = dev->dev_private; | 650 | struct drm_i915_private *dev_priv = dev->dev_private; |
526 | drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); | 651 | if (dev_priv->fbdev) |
652 | drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); | ||
527 | } | 653 | } |
528 | 654 | ||
529 | void intel_fbdev_restore_mode(struct drm_device *dev) | 655 | void intel_fbdev_restore_mode(struct drm_device *dev) |
@@ -531,7 +657,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev) | |||
531 | int ret; | 657 | int ret; |
532 | struct drm_i915_private *dev_priv = dev->dev_private; | 658 | struct drm_i915_private *dev_priv = dev->dev_private; |
533 | 659 | ||
534 | if (INTEL_INFO(dev)->num_pipes == 0) | 660 | if (!dev_priv->fbdev) |
535 | return; | 661 | return; |
536 | 662 | ||
537 | drm_modeset_lock_all(dev); | 663 | drm_modeset_lock_all(dev); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ceb479733991..b0413e190625 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -848,6 +848,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector, | |||
848 | return MODE_OK; | 848 | return MODE_OK; |
849 | } | 849 | } |
850 | 850 | ||
851 | static bool hdmi_12bpc_possible(struct intel_crtc *crtc) | ||
852 | { | ||
853 | struct drm_device *dev = crtc->base.dev; | ||
854 | struct intel_encoder *encoder; | ||
855 | int count = 0, count_hdmi = 0; | ||
856 | |||
857 | if (!HAS_PCH_SPLIT(dev)) | ||
858 | return false; | ||
859 | |||
860 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { | ||
861 | if (encoder->new_crtc != crtc) | ||
862 | continue; | ||
863 | |||
864 | count_hdmi += encoder->type == INTEL_OUTPUT_HDMI; | ||
865 | count++; | ||
866 | } | ||
867 | |||
868 | /* | ||
869 | * HDMI 12bpc affects the clocks, so it's only possible | ||
870 | * when not cloning with other encoder types. | ||
871 | */ | ||
872 | return count_hdmi > 0 && count_hdmi == count; | ||
873 | } | ||
874 | |||
851 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, | 875 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, |
852 | struct intel_crtc_config *pipe_config) | 876 | struct intel_crtc_config *pipe_config) |
853 | { | 877 | { |
@@ -880,7 +904,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
880 | * within limits. | 904 | * within limits. |
881 | */ | 905 | */ |
882 | if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && | 906 | if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && |
883 | clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { | 907 | clock_12bpc <= portclock_limit && |
908 | hdmi_12bpc_possible(encoder->new_crtc)) { | ||
884 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); | 909 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); |
885 | desired_bpp = 12*3; | 910 | desired_bpp = 12*3; |
886 | 911 | ||
@@ -1318,7 +1343,14 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) | |||
1318 | 1343 | ||
1319 | intel_encoder->type = INTEL_OUTPUT_HDMI; | 1344 | intel_encoder->type = INTEL_OUTPUT_HDMI; |
1320 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 1345 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
1321 | intel_encoder->cloneable = false; | 1346 | intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG; |
1347 | /* | ||
1348 | * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems | ||
1349 | * to work on real hardware. And since g4x can send infoframes to | ||
1350 | * only one port anyway, nothing is lost by allowing it. | ||
1351 | */ | ||
1352 | if (IS_G4X(dev)) | ||
1353 | intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI; | ||
1322 | 1354 | ||
1323 | intel_dig_port->port = port; | 1355 | intel_dig_port->port = port; |
1324 | intel_dig_port->hdmi.hdmi_reg = hdmi_reg; | 1356 | intel_dig_port->hdmi.hdmi_reg = hdmi_reg; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 48293d2cbf41..f1ecf916474a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -963,7 +963,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
963 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 963 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
964 | intel_encoder->type = INTEL_OUTPUT_LVDS; | 964 | intel_encoder->type = INTEL_OUTPUT_LVDS; |
965 | 965 | ||
966 | intel_encoder->cloneable = false; | 966 | intel_encoder->cloneable = 0; |
967 | if (HAS_PCH_SPLIT(dev)) | 967 | if (HAS_PCH_SPLIT(dev)) |
968 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 968 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
969 | else if (IS_GEN4(dev)) | 969 | else if (IS_GEN4(dev)) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f070d5d769ce..6e73125fc782 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2085,7 +2085,7 @@ static void intel_print_wm_latency(struct drm_device *dev, | |||
2085 | } | 2085 | } |
2086 | } | 2086 | } |
2087 | 2087 | ||
2088 | static void intel_setup_wm_latency(struct drm_device *dev) | 2088 | static void ilk_setup_wm_latency(struct drm_device *dev) |
2089 | { | 2089 | { |
2090 | struct drm_i915_private *dev_priv = dev->dev_private; | 2090 | struct drm_i915_private *dev_priv = dev->dev_private; |
2091 | 2091 | ||
@@ -2907,9 +2907,9 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) | |||
2907 | * the hw runs at the minimal clock before selecting the desired | 2907 | * the hw runs at the minimal clock before selecting the desired |
2908 | * frequency, if the down threshold expires in that window we will not | 2908 | * frequency, if the down threshold expires in that window we will not |
2909 | * receive a down interrupt. */ | 2909 | * receive a down interrupt. */ |
2910 | limits = dev_priv->rps.max_delay << 24; | 2910 | limits = dev_priv->rps.max_freq_softlimit << 24; |
2911 | if (val <= dev_priv->rps.min_delay) | 2911 | if (val <= dev_priv->rps.min_freq_softlimit) |
2912 | limits |= dev_priv->rps.min_delay << 16; | 2912 | limits |= dev_priv->rps.min_freq_softlimit << 16; |
2913 | 2913 | ||
2914 | return limits; | 2914 | return limits; |
2915 | } | 2915 | } |
@@ -2921,26 +2921,26 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) | |||
2921 | new_power = dev_priv->rps.power; | 2921 | new_power = dev_priv->rps.power; |
2922 | switch (dev_priv->rps.power) { | 2922 | switch (dev_priv->rps.power) { |
2923 | case LOW_POWER: | 2923 | case LOW_POWER: |
2924 | if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay) | 2924 | if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) |
2925 | new_power = BETWEEN; | 2925 | new_power = BETWEEN; |
2926 | break; | 2926 | break; |
2927 | 2927 | ||
2928 | case BETWEEN: | 2928 | case BETWEEN: |
2929 | if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay) | 2929 | if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) |
2930 | new_power = LOW_POWER; | 2930 | new_power = LOW_POWER; |
2931 | else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay) | 2931 | else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) |
2932 | new_power = HIGH_POWER; | 2932 | new_power = HIGH_POWER; |
2933 | break; | 2933 | break; |
2934 | 2934 | ||
2935 | case HIGH_POWER: | 2935 | case HIGH_POWER: |
2936 | if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay) | 2936 | if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) |
2937 | new_power = BETWEEN; | 2937 | new_power = BETWEEN; |
2938 | break; | 2938 | break; |
2939 | } | 2939 | } |
2940 | /* Max/min bins are special */ | 2940 | /* Max/min bins are special */ |
2941 | if (val == dev_priv->rps.min_delay) | 2941 | if (val == dev_priv->rps.min_freq_softlimit) |
2942 | new_power = LOW_POWER; | 2942 | new_power = LOW_POWER; |
2943 | if (val == dev_priv->rps.max_delay) | 2943 | if (val == dev_priv->rps.max_freq_softlimit) |
2944 | new_power = HIGH_POWER; | 2944 | new_power = HIGH_POWER; |
2945 | if (new_power == dev_priv->rps.power) | 2945 | if (new_power == dev_priv->rps.power) |
2946 | return; | 2946 | return; |
@@ -3014,10 +3014,10 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
3014 | struct drm_i915_private *dev_priv = dev->dev_private; | 3014 | struct drm_i915_private *dev_priv = dev->dev_private; |
3015 | 3015 | ||
3016 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3016 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
3017 | WARN_ON(val > dev_priv->rps.max_delay); | 3017 | WARN_ON(val > dev_priv->rps.max_freq_softlimit); |
3018 | WARN_ON(val < dev_priv->rps.min_delay); | 3018 | WARN_ON(val < dev_priv->rps.min_freq_softlimit); |
3019 | 3019 | ||
3020 | if (val == dev_priv->rps.cur_delay) { | 3020 | if (val == dev_priv->rps.cur_freq) { |
3021 | /* min/max delay may still have been modified so be sure to | 3021 | /* min/max delay may still have been modified so be sure to |
3022 | * write the limits value */ | 3022 | * write the limits value */ |
3023 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | 3023 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
@@ -3045,7 +3045,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
3045 | 3045 | ||
3046 | POSTING_READ(GEN6_RPNSWREQ); | 3046 | POSTING_READ(GEN6_RPNSWREQ); |
3047 | 3047 | ||
3048 | dev_priv->rps.cur_delay = val; | 3048 | dev_priv->rps.cur_freq = val; |
3049 | 3049 | ||
3050 | trace_intel_gpu_freq_change(val * 50); | 3050 | trace_intel_gpu_freq_change(val * 50); |
3051 | } | 3051 | } |
@@ -3065,7 +3065,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
3065 | * When we are idle. Drop to min voltage state. | 3065 | * When we are idle. Drop to min voltage state. |
3066 | */ | 3066 | */ |
3067 | 3067 | ||
3068 | if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay) | 3068 | if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit) |
3069 | return; | 3069 | return; |
3070 | 3070 | ||
3071 | /* Mask turbo interrupt so that they will not come in between */ | 3071 | /* Mask turbo interrupt so that they will not come in between */ |
@@ -3082,10 +3082,10 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
3082 | return; | 3082 | return; |
3083 | } | 3083 | } |
3084 | 3084 | ||
3085 | dev_priv->rps.cur_delay = dev_priv->rps.min_delay; | 3085 | dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit; |
3086 | 3086 | ||
3087 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, | 3087 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, |
3088 | dev_priv->rps.min_delay); | 3088 | dev_priv->rps.min_freq_softlimit); |
3089 | 3089 | ||
3090 | if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) | 3090 | if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) |
3091 | & GENFREQSTATUS) == 0, 5)) | 3091 | & GENFREQSTATUS) == 0, 5)) |
@@ -3099,7 +3099,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
3099 | /* Unmask Up interrupts */ | 3099 | /* Unmask Up interrupts */ |
3100 | dev_priv->rps.rp_up_masked = true; | 3100 | dev_priv->rps.rp_up_masked = true; |
3101 | gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD, | 3101 | gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD, |
3102 | dev_priv->rps.min_delay); | 3102 | dev_priv->rps.min_freq_softlimit); |
3103 | } | 3103 | } |
3104 | 3104 | ||
3105 | void gen6_rps_idle(struct drm_i915_private *dev_priv) | 3105 | void gen6_rps_idle(struct drm_i915_private *dev_priv) |
@@ -3111,7 +3111,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) | |||
3111 | if (IS_VALLEYVIEW(dev)) | 3111 | if (IS_VALLEYVIEW(dev)) |
3112 | vlv_set_rps_idle(dev_priv); | 3112 | vlv_set_rps_idle(dev_priv); |
3113 | else | 3113 | else |
3114 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3114 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); |
3115 | dev_priv->rps.last_adj = 0; | 3115 | dev_priv->rps.last_adj = 0; |
3116 | } | 3116 | } |
3117 | mutex_unlock(&dev_priv->rps.hw_lock); | 3117 | mutex_unlock(&dev_priv->rps.hw_lock); |
@@ -3124,9 +3124,9 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv) | |||
3124 | mutex_lock(&dev_priv->rps.hw_lock); | 3124 | mutex_lock(&dev_priv->rps.hw_lock); |
3125 | if (dev_priv->rps.enabled) { | 3125 | if (dev_priv->rps.enabled) { |
3126 | if (IS_VALLEYVIEW(dev)) | 3126 | if (IS_VALLEYVIEW(dev)) |
3127 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3127 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); |
3128 | else | 3128 | else |
3129 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3129 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); |
3130 | dev_priv->rps.last_adj = 0; | 3130 | dev_priv->rps.last_adj = 0; |
3131 | } | 3131 | } |
3132 | mutex_unlock(&dev_priv->rps.hw_lock); | 3132 | mutex_unlock(&dev_priv->rps.hw_lock); |
@@ -3137,20 +3137,20 @@ void valleyview_set_rps(struct drm_device *dev, u8 val) | |||
3137 | struct drm_i915_private *dev_priv = dev->dev_private; | 3137 | struct drm_i915_private *dev_priv = dev->dev_private; |
3138 | 3138 | ||
3139 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3139 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
3140 | WARN_ON(val > dev_priv->rps.max_delay); | 3140 | WARN_ON(val > dev_priv->rps.max_freq_softlimit); |
3141 | WARN_ON(val < dev_priv->rps.min_delay); | 3141 | WARN_ON(val < dev_priv->rps.min_freq_softlimit); |
3142 | 3142 | ||
3143 | DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", | 3143 | DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", |
3144 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), | 3144 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), |
3145 | dev_priv->rps.cur_delay, | 3145 | dev_priv->rps.cur_freq, |
3146 | vlv_gpu_freq(dev_priv, val), val); | 3146 | vlv_gpu_freq(dev_priv, val), val); |
3147 | 3147 | ||
3148 | if (val == dev_priv->rps.cur_delay) | 3148 | if (val == dev_priv->rps.cur_freq) |
3149 | return; | 3149 | return; |
3150 | 3150 | ||
3151 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); | 3151 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); |
3152 | 3152 | ||
3153 | dev_priv->rps.cur_delay = val; | 3153 | dev_priv->rps.cur_freq = val; |
3154 | 3154 | ||
3155 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); | 3155 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); |
3156 | } | 3156 | } |
@@ -3292,8 +3292,8 @@ static void gen8_enable_rps(struct drm_device *dev) | |||
3292 | 3292 | ||
3293 | /* Docs recommend 900MHz, and 300 MHz respectively */ | 3293 | /* Docs recommend 900MHz, and 300 MHz respectively */ |
3294 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | 3294 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
3295 | dev_priv->rps.max_delay << 24 | | 3295 | dev_priv->rps.max_freq_softlimit << 24 | |
3296 | dev_priv->rps.min_delay << 16); | 3296 | dev_priv->rps.min_freq_softlimit << 16); |
3297 | 3297 | ||
3298 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ | 3298 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ |
3299 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ | 3299 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ |
@@ -3324,9 +3324,9 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
3324 | { | 3324 | { |
3325 | struct drm_i915_private *dev_priv = dev->dev_private; | 3325 | struct drm_i915_private *dev_priv = dev->dev_private; |
3326 | struct intel_ring_buffer *ring; | 3326 | struct intel_ring_buffer *ring; |
3327 | u32 rp_state_cap, hw_max, hw_min; | 3327 | u32 rp_state_cap; |
3328 | u32 gt_perf_status; | 3328 | u32 gt_perf_status; |
3329 | u32 rc6vids, pcu_mbox, rc6_mask = 0; | 3329 | u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; |
3330 | u32 gtfifodbg; | 3330 | u32 gtfifodbg; |
3331 | int rc6_mode; | 3331 | int rc6_mode; |
3332 | int i, ret; | 3332 | int i, ret; |
@@ -3352,20 +3352,23 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
3352 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 3352 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
3353 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 3353 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
3354 | 3354 | ||
3355 | /* In units of 50MHz */ | 3355 | /* All of these values are in units of 50MHz */ |
3356 | dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff; | 3356 | dev_priv->rps.cur_freq = 0; |
3357 | hw_min = (rp_state_cap >> 16) & 0xff; | 3357 | /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */ |
3358 | dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff; | 3358 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; |
3359 | dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff; | 3359 | dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; |
3360 | dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay; | 3360 | dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; |
3361 | dev_priv->rps.cur_delay = 0; | 3361 | /* XXX: only BYT has a special efficient freq */ |
3362 | dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; | ||
3363 | /* hw_max = RP0 until we check for overclocking */ | ||
3364 | dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; | ||
3362 | 3365 | ||
3363 | /* Preserve min/max settings in case of re-init */ | 3366 | /* Preserve min/max settings in case of re-init */ |
3364 | if (dev_priv->rps.max_delay == 0) | 3367 | if (dev_priv->rps.max_freq_softlimit == 0) |
3365 | dev_priv->rps.max_delay = hw_max; | 3368 | dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; |
3366 | 3369 | ||
3367 | if (dev_priv->rps.min_delay == 0) | 3370 | if (dev_priv->rps.min_freq_softlimit == 0) |
3368 | dev_priv->rps.min_delay = hw_min; | 3371 | dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; |
3369 | 3372 | ||
3370 | /* disable the counters and set deterministic thresholds */ | 3373 | /* disable the counters and set deterministic thresholds */ |
3371 | I915_WRITE(GEN6_RC_CONTROL, 0); | 3374 | I915_WRITE(GEN6_RC_CONTROL, 0); |
@@ -3414,21 +3417,19 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
3414 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | 3417 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); |
3415 | 3418 | ||
3416 | ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); | 3419 | ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); |
3417 | if (!ret) { | 3420 | if (ret) |
3418 | pcu_mbox = 0; | ||
3419 | ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); | ||
3420 | if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ | ||
3421 | DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", | ||
3422 | (dev_priv->rps.max_delay & 0xff) * 50, | ||
3423 | (pcu_mbox & 0xff) * 50); | ||
3424 | dev_priv->rps.hw_max = pcu_mbox & 0xff; | ||
3425 | } | ||
3426 | } else { | ||
3427 | DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); | 3421 | DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); |
3422 | |||
3423 | ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); | ||
3424 | if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ | ||
3425 | DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", | ||
3426 | (dev_priv->rps.max_freq_softlimit & 0xff) * 50, | ||
3427 | (pcu_mbox & 0xff) * 50); | ||
3428 | dev_priv->rps.max_freq = pcu_mbox & 0xff; | ||
3428 | } | 3429 | } |
3429 | 3430 | ||
3430 | dev_priv->rps.power = HIGH_POWER; /* force a reset */ | 3431 | dev_priv->rps.power = HIGH_POWER; /* force a reset */ |
3431 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3432 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); |
3432 | 3433 | ||
3433 | gen6_enable_rps_interrupts(dev); | 3434 | gen6_enable_rps_interrupts(dev); |
3434 | 3435 | ||
@@ -3484,9 +3485,9 @@ void gen6_update_ring_freq(struct drm_device *dev) | |||
3484 | * to use for memory access. We do this by specifying the IA frequency | 3485 | * to use for memory access. We do this by specifying the IA frequency |
3485 | * the PCU should use as a reference to determine the ring frequency. | 3486 | * the PCU should use as a reference to determine the ring frequency. |
3486 | */ | 3487 | */ |
3487 | for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; | 3488 | for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit; |
3488 | gpu_freq--) { | 3489 | gpu_freq--) { |
3489 | int diff = dev_priv->rps.max_delay - gpu_freq; | 3490 | int diff = dev_priv->rps.max_freq_softlimit - gpu_freq; |
3490 | unsigned int ia_freq = 0, ring_freq = 0; | 3491 | unsigned int ia_freq = 0, ring_freq = 0; |
3491 | 3492 | ||
3492 | if (INTEL_INFO(dev)->gen >= 8) { | 3493 | if (INTEL_INFO(dev)->gen >= 8) { |
@@ -3597,7 +3598,7 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
3597 | { | 3598 | { |
3598 | struct drm_i915_private *dev_priv = dev->dev_private; | 3599 | struct drm_i915_private *dev_priv = dev->dev_private; |
3599 | struct intel_ring_buffer *ring; | 3600 | struct intel_ring_buffer *ring; |
3600 | u32 gtfifodbg, val, hw_max, hw_min, rc6_mode = 0; | 3601 | u32 gtfifodbg, val, rc6_mode = 0; |
3601 | int i; | 3602 | int i; |
3602 | 3603 | ||
3603 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3604 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
@@ -3652,38 +3653,39 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
3652 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); | 3653 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); |
3653 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); | 3654 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); |
3654 | 3655 | ||
3655 | dev_priv->rps.cur_delay = (val >> 8) & 0xff; | 3656 | dev_priv->rps.cur_freq = (val >> 8) & 0xff; |
3656 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", | 3657 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", |
3657 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), | 3658 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), |
3658 | dev_priv->rps.cur_delay); | 3659 | dev_priv->rps.cur_freq); |
3659 | 3660 | ||
3660 | dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv); | 3661 | dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); |
3662 | dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; | ||
3661 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", | 3663 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", |
3662 | vlv_gpu_freq(dev_priv, hw_max), | 3664 | vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq), |
3663 | hw_max); | 3665 | dev_priv->rps.max_freq); |
3664 | 3666 | ||
3665 | dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); | 3667 | dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); |
3666 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", | 3668 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", |
3667 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), | 3669 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), |
3668 | dev_priv->rps.rpe_delay); | 3670 | dev_priv->rps.efficient_freq); |
3669 | 3671 | ||
3670 | hw_min = valleyview_rps_min_freq(dev_priv); | 3672 | dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); |
3671 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", | 3673 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", |
3672 | vlv_gpu_freq(dev_priv, hw_min), | 3674 | vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), |
3673 | hw_min); | 3675 | dev_priv->rps.min_freq); |
3674 | 3676 | ||
3675 | /* Preserve min/max settings in case of re-init */ | 3677 | /* Preserve min/max settings in case of re-init */ |
3676 | if (dev_priv->rps.max_delay == 0) | 3678 | if (dev_priv->rps.max_freq_softlimit == 0) |
3677 | dev_priv->rps.max_delay = hw_max; | 3679 | dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; |
3678 | 3680 | ||
3679 | if (dev_priv->rps.min_delay == 0) | 3681 | if (dev_priv->rps.min_freq_softlimit == 0) |
3680 | dev_priv->rps.min_delay = hw_min; | 3682 | dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; |
3681 | 3683 | ||
3682 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", | 3684 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", |
3683 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), | 3685 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), |
3684 | dev_priv->rps.rpe_delay); | 3686 | dev_priv->rps.efficient_freq); |
3685 | 3687 | ||
3686 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); | 3688 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); |
3687 | 3689 | ||
3688 | dev_priv->rps.rp_up_masked = false; | 3690 | dev_priv->rps.rp_up_masked = false; |
3689 | dev_priv->rps.rp_down_masked = false; | 3691 | dev_priv->rps.rp_down_masked = false; |
@@ -4124,7 +4126,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) | |||
4124 | 4126 | ||
4125 | assert_spin_locked(&mchdev_lock); | 4127 | assert_spin_locked(&mchdev_lock); |
4126 | 4128 | ||
4127 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4)); | 4129 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4)); |
4128 | pxvid = (pxvid >> 24) & 0x7f; | 4130 | pxvid = (pxvid >> 24) & 0x7f; |
4129 | ext_v = pvid_to_extvid(dev_priv, pxvid); | 4131 | ext_v = pvid_to_extvid(dev_priv, pxvid); |
4130 | 4132 | ||
@@ -5345,8 +5347,6 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, | |||
5345 | bool is_enabled, enable_requested; | 5347 | bool is_enabled, enable_requested; |
5346 | uint32_t tmp; | 5348 | uint32_t tmp; |
5347 | 5349 | ||
5348 | WARN_ON(dev_priv->pc8.enabled); | ||
5349 | |||
5350 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); | 5350 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); |
5351 | is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; | 5351 | is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; |
5352 | enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; | 5352 | enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; |
@@ -5391,7 +5391,6 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, | |||
5391 | static void hsw_power_well_enable(struct drm_i915_private *dev_priv, | 5391 | static void hsw_power_well_enable(struct drm_i915_private *dev_priv, |
5392 | struct i915_power_well *power_well) | 5392 | struct i915_power_well *power_well) |
5393 | { | 5393 | { |
5394 | hsw_disable_package_c8(dev_priv); | ||
5395 | hsw_set_power_well(dev_priv, power_well, true); | 5394 | hsw_set_power_well(dev_priv, power_well, true); |
5396 | } | 5395 | } |
5397 | 5396 | ||
@@ -5399,7 +5398,6 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, | |||
5399 | struct i915_power_well *power_well) | 5398 | struct i915_power_well *power_well) |
5400 | { | 5399 | { |
5401 | hsw_set_power_well(dev_priv, power_well, false); | 5400 | hsw_set_power_well(dev_priv, power_well, false); |
5402 | hsw_enable_package_c8(dev_priv); | ||
5403 | } | 5401 | } |
5404 | 5402 | ||
5405 | static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, | 5403 | static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, |
@@ -5577,6 +5575,8 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, | |||
5577 | struct i915_power_well *power_well; | 5575 | struct i915_power_well *power_well; |
5578 | int i; | 5576 | int i; |
5579 | 5577 | ||
5578 | intel_runtime_pm_get(dev_priv); | ||
5579 | |||
5580 | power_domains = &dev_priv->power_domains; | 5580 | power_domains = &dev_priv->power_domains; |
5581 | 5581 | ||
5582 | mutex_lock(&power_domains->lock); | 5582 | mutex_lock(&power_domains->lock); |
@@ -5621,6 +5621,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, | |||
5621 | } | 5621 | } |
5622 | 5622 | ||
5623 | mutex_unlock(&power_domains->lock); | 5623 | mutex_unlock(&power_domains->lock); |
5624 | |||
5625 | intel_runtime_pm_put(dev_priv); | ||
5624 | } | 5626 | } |
5625 | 5627 | ||
5626 | static struct i915_power_domains *hsw_pwr; | 5628 | static struct i915_power_domains *hsw_pwr; |
@@ -5884,15 +5886,14 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) | |||
5884 | intel_power_domains_resume(dev_priv); | 5886 | intel_power_domains_resume(dev_priv); |
5885 | } | 5887 | } |
5886 | 5888 | ||
5887 | /* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */ | ||
5888 | void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) | 5889 | void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) |
5889 | { | 5890 | { |
5890 | hsw_disable_package_c8(dev_priv); | 5891 | intel_runtime_pm_get(dev_priv); |
5891 | } | 5892 | } |
5892 | 5893 | ||
5893 | void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) | 5894 | void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) |
5894 | { | 5895 | { |
5895 | hsw_enable_package_c8(dev_priv); | 5896 | intel_runtime_pm_put(dev_priv); |
5896 | } | 5897 | } |
5897 | 5898 | ||
5898 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv) | 5899 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv) |
@@ -5924,8 +5925,6 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv) | |||
5924 | struct drm_device *dev = dev_priv->dev; | 5925 | struct drm_device *dev = dev_priv->dev; |
5925 | struct device *device = &dev->pdev->dev; | 5926 | struct device *device = &dev->pdev->dev; |
5926 | 5927 | ||
5927 | dev_priv->pm.suspended = false; | ||
5928 | |||
5929 | if (!HAS_RUNTIME_PM(dev)) | 5928 | if (!HAS_RUNTIME_PM(dev)) |
5930 | return; | 5929 | return; |
5931 | 5930 | ||
@@ -5934,6 +5933,8 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv) | |||
5934 | pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ | 5933 | pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ |
5935 | pm_runtime_mark_last_busy(device); | 5934 | pm_runtime_mark_last_busy(device); |
5936 | pm_runtime_use_autosuspend(device); | 5935 | pm_runtime_use_autosuspend(device); |
5936 | |||
5937 | pm_runtime_put_autosuspend(device); | ||
5937 | } | 5938 | } |
5938 | 5939 | ||
5939 | void intel_fini_runtime_pm(struct drm_i915_private *dev_priv) | 5940 | void intel_fini_runtime_pm(struct drm_i915_private *dev_priv) |
@@ -5985,7 +5986,7 @@ void intel_init_pm(struct drm_device *dev) | |||
5985 | 5986 | ||
5986 | /* For FIFO watermark updates */ | 5987 | /* For FIFO watermark updates */ |
5987 | if (HAS_PCH_SPLIT(dev)) { | 5988 | if (HAS_PCH_SPLIT(dev)) { |
5988 | intel_setup_wm_latency(dev); | 5989 | ilk_setup_wm_latency(dev); |
5989 | 5990 | ||
5990 | if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && | 5991 | if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && |
5991 | dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || | 5992 | dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || |
@@ -6156,12 +6157,9 @@ void intel_pm_setup(struct drm_device *dev) | |||
6156 | 6157 | ||
6157 | mutex_init(&dev_priv->rps.hw_lock); | 6158 | mutex_init(&dev_priv->rps.hw_lock); |
6158 | 6159 | ||
6159 | mutex_init(&dev_priv->pc8.lock); | ||
6160 | dev_priv->pc8.requirements_met = false; | ||
6161 | dev_priv->pc8.irqs_disabled = false; | ||
6162 | dev_priv->pc8.enabled = false; | ||
6163 | dev_priv->pc8.disable_count = 1; /* requirements_met */ | ||
6164 | INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); | ||
6165 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | 6160 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, |
6166 | intel_gen6_powersave_work); | 6161 | intel_gen6_powersave_work); |
6162 | |||
6163 | dev_priv->pm.suspended = false; | ||
6164 | dev_priv->pm.irqs_disabled = false; | ||
6167 | } | 6165 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 859092130c18..4eb3e062b4e3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -440,15 +440,17 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
440 | 440 | ||
441 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | 441 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
442 | 442 | ||
443 | if (I915_NEED_GFX_HWS(dev)) | ||
444 | intel_ring_setup_status_page(ring); | ||
445 | else | ||
446 | ring_setup_phys_status_page(ring); | ||
447 | |||
448 | /* Stop the ring if it's running. */ | 443 | /* Stop the ring if it's running. */ |
449 | I915_WRITE_CTL(ring, 0); | 444 | I915_WRITE_CTL(ring, 0); |
450 | I915_WRITE_HEAD(ring, 0); | 445 | I915_WRITE_HEAD(ring, 0); |
451 | ring->write_tail(ring, 0); | 446 | ring->write_tail(ring, 0); |
447 | if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) | ||
448 | DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); | ||
449 | |||
450 | if (I915_NEED_GFX_HWS(dev)) | ||
451 | intel_ring_setup_status_page(ring); | ||
452 | else | ||
453 | ring_setup_phys_status_page(ring); | ||
452 | 454 | ||
453 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 455 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
454 | 456 | ||
@@ -979,9 +981,19 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |||
979 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); | 981 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
980 | POSTING_READ(mmio); | 982 | POSTING_READ(mmio); |
981 | 983 | ||
982 | /* Flush the TLB for this page */ | 984 | /* |
983 | if (INTEL_INFO(dev)->gen >= 6) { | 985 | * Flush the TLB for this page |
986 | * | ||
987 | * FIXME: These two bits have disappeared on gen8, so a question | ||
988 | * arises: do we still need this and if so how should we go about | ||
989 | * invalidating the TLB? | ||
990 | */ | ||
991 | if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { | ||
984 | u32 reg = RING_INSTPM(ring->mmio_base); | 992 | u32 reg = RING_INSTPM(ring->mmio_base); |
993 | |||
994 | /* ring should be idle before issuing a sync flush*/ | ||
995 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); | ||
996 | |||
985 | I915_WRITE(reg, | 997 | I915_WRITE(reg, |
986 | _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | | 998 | _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | |
987 | INSTPM_SYNC_FLUSH)); | 999 | INSTPM_SYNC_FLUSH)); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 09af92099c1b..f11ceb230db4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -33,6 +33,8 @@ struct intel_hw_status_page { | |||
33 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) | 33 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
34 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | 34 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
35 | 35 | ||
36 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) | ||
37 | |||
36 | enum intel_ring_hangcheck_action { | 38 | enum intel_ring_hangcheck_action { |
37 | HANGCHECK_IDLE = 0, | 39 | HANGCHECK_IDLE = 0, |
38 | HANGCHECK_WAIT, | 40 | HANGCHECK_WAIT, |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 825853d82a4d..d27155adf5db 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1461,7 +1461,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) | |||
1461 | u32 temp; | 1461 | u32 temp; |
1462 | bool input1, input2; | 1462 | bool input1, input2; |
1463 | int i; | 1463 | int i; |
1464 | u8 status; | 1464 | bool success; |
1465 | 1465 | ||
1466 | temp = I915_READ(intel_sdvo->sdvo_reg); | 1466 | temp = I915_READ(intel_sdvo->sdvo_reg); |
1467 | if ((temp & SDVO_ENABLE) == 0) { | 1467 | if ((temp & SDVO_ENABLE) == 0) { |
@@ -1475,12 +1475,12 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) | |||
1475 | for (i = 0; i < 2; i++) | 1475 | for (i = 0; i < 2; i++) |
1476 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1476 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1477 | 1477 | ||
1478 | status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); | 1478 | success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); |
1479 | /* Warn if the device reported failure to sync. | 1479 | /* Warn if the device reported failure to sync. |
1480 | * A lot of SDVO devices fail to notify of sync, but it's | 1480 | * A lot of SDVO devices fail to notify of sync, but it's |
1481 | * a given it the status is a success, we succeeded. | 1481 | * a given it the status is a success, we succeeded. |
1482 | */ | 1482 | */ |
1483 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { | 1483 | if (success && !input1) { |
1484 | DRM_DEBUG_KMS("First %s output reported failure to " | 1484 | DRM_DEBUG_KMS("First %s output reported failure to " |
1485 | "sync\n", SDVO_NAME(intel_sdvo)); | 1485 | "sync\n", SDVO_NAME(intel_sdvo)); |
1486 | } | 1486 | } |
@@ -3032,7 +3032,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) | |||
3032 | * simplistic anyway to express such constraints, so just give up on | 3032 | * simplistic anyway to express such constraints, so just give up on |
3033 | * cloning for SDVO encoders. | 3033 | * cloning for SDVO encoders. |
3034 | */ | 3034 | */ |
3035 | intel_sdvo->base.cloneable = false; | 3035 | intel_sdvo->base.cloneable = 0; |
3036 | 3036 | ||
3037 | intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); | 3037 | intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); |
3038 | 3038 | ||
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index b64fc1c6ff3f..5be4ab218054 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1639,9 +1639,8 @@ intel_tv_init(struct drm_device *dev) | |||
1639 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 1639 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
1640 | intel_encoder->type = INTEL_OUTPUT_TVOUT; | 1640 | intel_encoder->type = INTEL_OUTPUT_TVOUT; |
1641 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 1641 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
1642 | intel_encoder->cloneable = false; | 1642 | intel_encoder->cloneable = 0; |
1643 | intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); | 1643 | intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); |
1644 | intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); | ||
1645 | intel_tv->type = DRM_MODE_CONNECTOR_Unknown; | 1644 | intel_tv->type = DRM_MODE_CONNECTOR_Unknown; |
1646 | 1645 | ||
1647 | /* BIOS margin values */ | 1646 | /* BIOS margin values */ |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 7861d97600e1..c3832d9270a6 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -280,12 +280,17 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv, | |||
280 | 280 | ||
281 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 281 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
282 | 282 | ||
283 | if (fw_engine & FORCEWAKE_RENDER && | 283 | if (fw_engine & FORCEWAKE_RENDER) { |
284 | --dev_priv->uncore.fw_rendercount != 0) | 284 | WARN_ON(!dev_priv->uncore.fw_rendercount); |
285 | fw_engine &= ~FORCEWAKE_RENDER; | 285 | if (--dev_priv->uncore.fw_rendercount != 0) |
286 | if (fw_engine & FORCEWAKE_MEDIA && | 286 | fw_engine &= ~FORCEWAKE_RENDER; |
287 | --dev_priv->uncore.fw_mediacount != 0) | 287 | } |
288 | fw_engine &= ~FORCEWAKE_MEDIA; | 288 | |
289 | if (fw_engine & FORCEWAKE_MEDIA) { | ||
290 | WARN_ON(!dev_priv->uncore.fw_mediacount); | ||
291 | if (--dev_priv->uncore.fw_mediacount != 0) | ||
292 | fw_engine &= ~FORCEWAKE_MEDIA; | ||
293 | } | ||
289 | 294 | ||
290 | if (fw_engine) | 295 | if (fw_engine) |
291 | dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine); | 296 | dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine); |
@@ -301,6 +306,8 @@ static void gen6_force_wake_timer(unsigned long arg) | |||
301 | assert_device_not_suspended(dev_priv); | 306 | assert_device_not_suspended(dev_priv); |
302 | 307 | ||
303 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 308 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
309 | WARN_ON(!dev_priv->uncore.forcewake_count); | ||
310 | |||
304 | if (--dev_priv->uncore.forcewake_count == 0) | 311 | if (--dev_priv->uncore.forcewake_count == 0) |
305 | dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); | 312 | dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); |
306 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 313 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
@@ -308,9 +315,17 @@ static void gen6_force_wake_timer(unsigned long arg) | |||
308 | intel_runtime_pm_put(dev_priv); | 315 | intel_runtime_pm_put(dev_priv); |
309 | } | 316 | } |
310 | 317 | ||
311 | static void intel_uncore_forcewake_reset(struct drm_device *dev) | 318 | static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) |
312 | { | 319 | { |
313 | struct drm_i915_private *dev_priv = dev->dev_private; | 320 | struct drm_i915_private *dev_priv = dev->dev_private; |
321 | unsigned long irqflags; | ||
322 | |||
323 | del_timer_sync(&dev_priv->uncore.force_wake_timer); | ||
324 | |||
325 | /* Hold uncore.lock across reset to prevent any register access | ||
326 | * with forcewake not set correctly | ||
327 | */ | ||
328 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | ||
314 | 329 | ||
315 | if (IS_VALLEYVIEW(dev)) | 330 | if (IS_VALLEYVIEW(dev)) |
316 | vlv_force_wake_reset(dev_priv); | 331 | vlv_force_wake_reset(dev_priv); |
@@ -319,6 +334,35 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev) | |||
319 | 334 | ||
320 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev)) | 335 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev)) |
321 | __gen7_gt_force_wake_mt_reset(dev_priv); | 336 | __gen7_gt_force_wake_mt_reset(dev_priv); |
337 | |||
338 | if (restore) { /* If reset with a user forcewake, try to restore */ | ||
339 | unsigned fw = 0; | ||
340 | |||
341 | if (IS_VALLEYVIEW(dev)) { | ||
342 | if (dev_priv->uncore.fw_rendercount) | ||
343 | fw |= FORCEWAKE_RENDER; | ||
344 | |||
345 | if (dev_priv->uncore.fw_mediacount) | ||
346 | fw |= FORCEWAKE_MEDIA; | ||
347 | } else { | ||
348 | if (dev_priv->uncore.forcewake_count) | ||
349 | fw = FORCEWAKE_ALL; | ||
350 | } | ||
351 | |||
352 | if (fw) | ||
353 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); | ||
354 | |||
355 | if (IS_GEN6(dev) || IS_GEN7(dev)) | ||
356 | dev_priv->uncore.fifo_count = | ||
357 | __raw_i915_read32(dev_priv, GTFIFOCTL) & | ||
358 | GT_FIFO_FREE_ENTRIES_MASK; | ||
359 | } else { | ||
360 | dev_priv->uncore.forcewake_count = 0; | ||
361 | dev_priv->uncore.fw_rendercount = 0; | ||
362 | dev_priv->uncore.fw_mediacount = 0; | ||
363 | } | ||
364 | |||
365 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | ||
322 | } | 366 | } |
323 | 367 | ||
324 | void intel_uncore_early_sanitize(struct drm_device *dev) | 368 | void intel_uncore_early_sanitize(struct drm_device *dev) |
@@ -344,7 +388,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev) | |||
344 | __raw_i915_write32(dev_priv, GTFIFODBG, | 388 | __raw_i915_write32(dev_priv, GTFIFODBG, |
345 | __raw_i915_read32(dev_priv, GTFIFODBG)); | 389 | __raw_i915_read32(dev_priv, GTFIFODBG)); |
346 | 390 | ||
347 | intel_uncore_forcewake_reset(dev); | 391 | intel_uncore_forcewake_reset(dev, false); |
348 | } | 392 | } |
349 | 393 | ||
350 | void intel_uncore_sanitize(struct drm_device *dev) | 394 | void intel_uncore_sanitize(struct drm_device *dev) |
@@ -415,6 +459,8 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) | |||
415 | 459 | ||
416 | 460 | ||
417 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 461 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
462 | WARN_ON(!dev_priv->uncore.forcewake_count); | ||
463 | |||
418 | if (--dev_priv->uncore.forcewake_count == 0) { | 464 | if (--dev_priv->uncore.forcewake_count == 0) { |
419 | dev_priv->uncore.forcewake_count++; | 465 | dev_priv->uncore.forcewake_count++; |
420 | delayed = true; | 466 | delayed = true; |
@@ -690,6 +736,8 @@ void intel_uncore_init(struct drm_device *dev) | |||
690 | setup_timer(&dev_priv->uncore.force_wake_timer, | 736 | setup_timer(&dev_priv->uncore.force_wake_timer, |
691 | gen6_force_wake_timer, (unsigned long)dev_priv); | 737 | gen6_force_wake_timer, (unsigned long)dev_priv); |
692 | 738 | ||
739 | intel_uncore_early_sanitize(dev); | ||
740 | |||
693 | if (IS_VALLEYVIEW(dev)) { | 741 | if (IS_VALLEYVIEW(dev)) { |
694 | dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; | 742 | dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; |
695 | dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; | 743 | dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; |
@@ -798,13 +846,9 @@ void intel_uncore_init(struct drm_device *dev) | |||
798 | 846 | ||
799 | void intel_uncore_fini(struct drm_device *dev) | 847 | void intel_uncore_fini(struct drm_device *dev) |
800 | { | 848 | { |
801 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
802 | |||
803 | del_timer_sync(&dev_priv->uncore.force_wake_timer); | ||
804 | |||
805 | /* Paranoia: make sure we have disabled everything before we exit. */ | 849 | /* Paranoia: make sure we have disabled everything before we exit. */ |
806 | intel_uncore_sanitize(dev); | 850 | intel_uncore_sanitize(dev); |
807 | intel_uncore_forcewake_reset(dev); | 851 | intel_uncore_forcewake_reset(dev, false); |
808 | } | 852 | } |
809 | 853 | ||
810 | static const struct register_whitelist { | 854 | static const struct register_whitelist { |
@@ -953,13 +997,6 @@ static int gen6_do_reset(struct drm_device *dev) | |||
953 | { | 997 | { |
954 | struct drm_i915_private *dev_priv = dev->dev_private; | 998 | struct drm_i915_private *dev_priv = dev->dev_private; |
955 | int ret; | 999 | int ret; |
956 | unsigned long irqflags; | ||
957 | u32 fw_engine = 0; | ||
958 | |||
959 | /* Hold uncore.lock across reset to prevent any register access | ||
960 | * with forcewake not set correctly | ||
961 | */ | ||
962 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | ||
963 | 1000 | ||
964 | /* Reset the chip */ | 1001 | /* Reset the chip */ |
965 | 1002 | ||
@@ -972,29 +1009,8 @@ static int gen6_do_reset(struct drm_device *dev) | |||
972 | /* Spin waiting for the device to ack the reset request */ | 1009 | /* Spin waiting for the device to ack the reset request */ |
973 | ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); | 1010 | ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); |
974 | 1011 | ||
975 | intel_uncore_forcewake_reset(dev); | 1012 | intel_uncore_forcewake_reset(dev, true); |
976 | 1013 | ||
977 | /* If reset with a user forcewake, try to restore */ | ||
978 | if (IS_VALLEYVIEW(dev)) { | ||
979 | if (dev_priv->uncore.fw_rendercount) | ||
980 | fw_engine |= FORCEWAKE_RENDER; | ||
981 | |||
982 | if (dev_priv->uncore.fw_mediacount) | ||
983 | fw_engine |= FORCEWAKE_MEDIA; | ||
984 | } else { | ||
985 | if (dev_priv->uncore.forcewake_count) | ||
986 | fw_engine = FORCEWAKE_ALL; | ||
987 | } | ||
988 | |||
989 | if (fw_engine) | ||
990 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine); | ||
991 | |||
992 | if (IS_GEN6(dev) || IS_GEN7(dev)) | ||
993 | dev_priv->uncore.fifo_count = | ||
994 | __raw_i915_read32(dev_priv, GTFIFOCTL) & | ||
995 | GT_FIFO_FREE_ENTRIES_MASK; | ||
996 | |||
997 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | ||
998 | return ret; | 1014 | return ret; |
999 | } | 1015 | } |
1000 | 1016 | ||
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 42947566e755..b4f58914bf7d 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
@@ -438,6 +438,9 @@ struct drm_dp_aux_msg { | |||
438 | * The .dev field should be set to a pointer to the device that implements | 438 | * The .dev field should be set to a pointer to the device that implements |
439 | * the AUX channel. | 439 | * the AUX channel. |
440 | * | 440 | * |
441 | * The .name field may be used to specify the name of the I2C adapter. If set to | ||
442 | * NULL, dev_name() of .dev will be used. | ||
443 | * | ||
441 | * Drivers provide a hardware-specific implementation of how transactions | 444 | * Drivers provide a hardware-specific implementation of how transactions |
442 | * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg | 445 | * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg |
443 | * structure describing the transaction is passed into this function. Upon | 446 | * structure describing the transaction is passed into this function. Upon |
@@ -455,6 +458,7 @@ struct drm_dp_aux_msg { | |||
455 | * should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter. | 458 | * should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter. |
456 | */ | 459 | */ |
457 | struct drm_dp_aux { | 460 | struct drm_dp_aux { |
461 | const char *name; | ||
458 | struct i2c_adapter ddc; | 462 | struct i2c_adapter ddc; |
459 | struct device *dev; | 463 | struct device *dev; |
460 | 464 | ||