diff options
author | Dave Airlie <airlied@redhat.com> | 2010-10-25 19:23:22 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-25 19:23:22 -0400 |
commit | e3ce8a0b277438591844847ac7c89a980b4cfa6d (patch) | |
tree | c9bf47675403a54be2e0c54df9357d2b9c65326b /drivers | |
parent | e1efc9b6ac22c605fd326b3f6af9b393325d43b4 (diff) | |
parent | 641934069d29211baf82afb93622a426172b67b6 (diff) |
Merge remote branch 'intel/drm-intel-next' of ../drm-next into drm-core-next
* 'intel/drm-intel-next' of ../drm-next: (63 commits)
drm/i915: Move gpu_write_list to per-ring
drm/i915: Invalidate the to-ring, flush the old-ring when updating domains
drm/i915/ringbuffer: Write the value passed in to the tail register
agp/intel: Restore valid PTE bit for Sandybridge after bdd3072
drm/i915: Fix flushing regression from 9af90d19f
drm/i915/sdvo: Remove unused encoding member
i915: enable AVI infoframe for intel_hdmi.c [v4]
drm/i915: Fix current fb blocking for page flip
drm/i915: IS_IRONLAKE is synonymous with gen == 5
drm/i915: Enable SandyBridge blitter ring
drm/i915/ringbuffer: Remove broken intel_fill_struct()
drm/i915/ringbuffer: Fix emit batch buffer regression from 8187a2b
drm/i915: Copy the updated reloc->presumed_offset back to the user
drm/i915: Track objects in global active list (as well as per-ring)
drm/i915: Simplify most HAS_BSD() checks
drm/i915: cache the last object lookup during pin_and_relocate()
drm/i915: Do interrupible mutex lock first to avoid locking for unreference
drivers: gpu: drm: i915: Fix a typo.
agp/intel: Also add B43.1 to list of supported devices
drm/i915: rearrange mutex acquisition for pread
...
Diffstat (limited to 'drivers')
26 files changed, 1970 insertions, 1365 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 5cd2221ab47..e72f49d5220 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -895,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
895 | ID(PCI_DEVICE_ID_INTEL_G45_HB), | 895 | ID(PCI_DEVICE_ID_INTEL_G45_HB), |
896 | ID(PCI_DEVICE_ID_INTEL_G41_HB), | 896 | ID(PCI_DEVICE_ID_INTEL_G41_HB), |
897 | ID(PCI_DEVICE_ID_INTEL_B43_HB), | 897 | ID(PCI_DEVICE_ID_INTEL_B43_HB), |
898 | ID(PCI_DEVICE_ID_INTEL_B43_1_HB), | ||
898 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), | 899 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), |
899 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), | 900 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), |
900 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), | 901 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 0c8ff6d8824..6b6760ea243 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -1211,13 +1211,13 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, | |||
1211 | u32 pte_flags; | 1211 | u32 pte_flags; |
1212 | 1212 | ||
1213 | if (type_mask == AGP_USER_UNCACHED_MEMORY) | 1213 | if (type_mask == AGP_USER_UNCACHED_MEMORY) |
1214 | pte_flags = GEN6_PTE_UNCACHED; | 1214 | pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; |
1215 | else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { | 1215 | else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { |
1216 | pte_flags = GEN6_PTE_LLC; | 1216 | pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; |
1217 | if (gfdt) | 1217 | if (gfdt) |
1218 | pte_flags |= GEN6_PTE_GFDT; | 1218 | pte_flags |= GEN6_PTE_GFDT; |
1219 | } else { /* set 'normal'/'cached' to LLC by default */ | 1219 | } else { /* set 'normal'/'cached' to LLC by default */ |
1220 | pte_flags = GEN6_PTE_LLC_MLC; | 1220 | pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; |
1221 | if (gfdt) | 1221 | if (gfdt) |
1222 | pte_flags |= GEN6_PTE_GFDT; | 1222 | pte_flags |= GEN6_PTE_GFDT; |
1223 | } | 1223 | } |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index fd033ebbdf8..c1a26217a53 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -1267,34 +1267,51 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, | |||
1267 | } | 1267 | } |
1268 | 1268 | ||
1269 | #define HDMI_IDENTIFIER 0x000C03 | 1269 | #define HDMI_IDENTIFIER 0x000C03 |
1270 | #define AUDIO_BLOCK 0x01 | ||
1270 | #define VENDOR_BLOCK 0x03 | 1271 | #define VENDOR_BLOCK 0x03 |
1272 | #define EDID_BASIC_AUDIO (1 << 6) | ||
1273 | |||
1271 | /** | 1274 | /** |
1272 | * drm_detect_hdmi_monitor - detect whether monitor is hdmi. | 1275 | * Search EDID for CEA extension block. |
1273 | * @edid: monitor EDID information | ||
1274 | * | ||
1275 | * Parse the CEA extension according to CEA-861-B. | ||
1276 | * Return true if HDMI, false if not or unknown. | ||
1277 | */ | 1276 | */ |
1278 | bool drm_detect_hdmi_monitor(struct edid *edid) | 1277 | static u8 *drm_find_cea_extension(struct edid *edid) |
1279 | { | 1278 | { |
1280 | char *edid_ext = NULL; | 1279 | u8 *edid_ext = NULL; |
1281 | int i, hdmi_id; | 1280 | int i; |
1282 | int start_offset, end_offset; | ||
1283 | bool is_hdmi = false; | ||
1284 | 1281 | ||
1285 | /* No EDID or EDID extensions */ | 1282 | /* No EDID or EDID extensions */ |
1286 | if (edid == NULL || edid->extensions == 0) | 1283 | if (edid == NULL || edid->extensions == 0) |
1287 | goto end; | 1284 | return NULL; |
1288 | 1285 | ||
1289 | /* Find CEA extension */ | 1286 | /* Find CEA extension */ |
1290 | for (i = 0; i < edid->extensions; i++) { | 1287 | for (i = 0; i < edid->extensions; i++) { |
1291 | edid_ext = (char *)edid + EDID_LENGTH * (i + 1); | 1288 | edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); |
1292 | /* This block is CEA extension */ | 1289 | if (edid_ext[0] == CEA_EXT) |
1293 | if (edid_ext[0] == 0x02) | ||
1294 | break; | 1290 | break; |
1295 | } | 1291 | } |
1296 | 1292 | ||
1297 | if (i == edid->extensions) | 1293 | if (i == edid->extensions) |
1294 | return NULL; | ||
1295 | |||
1296 | return edid_ext; | ||
1297 | } | ||
1298 | |||
1299 | /** | ||
1300 | * drm_detect_hdmi_monitor - detect whether monitor is hdmi. | ||
1301 | * @edid: monitor EDID information | ||
1302 | * | ||
1303 | * Parse the CEA extension according to CEA-861-B. | ||
1304 | * Return true if HDMI, false if not or unknown. | ||
1305 | */ | ||
1306 | bool drm_detect_hdmi_monitor(struct edid *edid) | ||
1307 | { | ||
1308 | u8 *edid_ext; | ||
1309 | int i, hdmi_id; | ||
1310 | int start_offset, end_offset; | ||
1311 | bool is_hdmi = false; | ||
1312 | |||
1313 | edid_ext = drm_find_cea_extension(edid); | ||
1314 | if (!edid_ext) | ||
1298 | goto end; | 1315 | goto end; |
1299 | 1316 | ||
1300 | /* Data block offset in CEA extension block */ | 1317 | /* Data block offset in CEA extension block */ |
@@ -1325,6 +1342,53 @@ end: | |||
1325 | EXPORT_SYMBOL(drm_detect_hdmi_monitor); | 1342 | EXPORT_SYMBOL(drm_detect_hdmi_monitor); |
1326 | 1343 | ||
1327 | /** | 1344 | /** |
1345 | * drm_detect_monitor_audio - check monitor audio capability | ||
1346 | * | ||
1347 | * Monitor should have CEA extension block. | ||
1348 | * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic | ||
1349 | * audio' only. If there is any audio extension block and supported | ||
1350 | * audio format, assume at least 'basic audio' support, even if 'basic | ||
1351 | * audio' is not defined in EDID. | ||
1352 | * | ||
1353 | */ | ||
1354 | bool drm_detect_monitor_audio(struct edid *edid) | ||
1355 | { | ||
1356 | u8 *edid_ext; | ||
1357 | int i, j; | ||
1358 | bool has_audio = false; | ||
1359 | int start_offset, end_offset; | ||
1360 | |||
1361 | edid_ext = drm_find_cea_extension(edid); | ||
1362 | if (!edid_ext) | ||
1363 | goto end; | ||
1364 | |||
1365 | has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); | ||
1366 | |||
1367 | if (has_audio) { | ||
1368 | DRM_DEBUG_KMS("Monitor has basic audio support\n"); | ||
1369 | goto end; | ||
1370 | } | ||
1371 | |||
1372 | /* Data block offset in CEA extension block */ | ||
1373 | start_offset = 4; | ||
1374 | end_offset = edid_ext[2]; | ||
1375 | |||
1376 | for (i = start_offset; i < end_offset; | ||
1377 | i += ((edid_ext[i] & 0x1f) + 1)) { | ||
1378 | if ((edid_ext[i] >> 5) == AUDIO_BLOCK) { | ||
1379 | has_audio = true; | ||
1380 | for (j = 1; j < (edid_ext[i] & 0x1f); j += 3) | ||
1381 | DRM_DEBUG_KMS("CEA audio format %d\n", | ||
1382 | (edid_ext[i + j] >> 3) & 0xf); | ||
1383 | goto end; | ||
1384 | } | ||
1385 | } | ||
1386 | end: | ||
1387 | return has_audio; | ||
1388 | } | ||
1389 | EXPORT_SYMBOL(drm_detect_monitor_audio); | ||
1390 | |||
1391 | /** | ||
1328 | * drm_add_edid_modes - add modes from EDID data, if available | 1392 | * drm_add_edid_modes - add modes from EDID data, if available |
1329 | * @connector: connector we're probing | 1393 | * @connector: connector we're probing |
1330 | * @edid: edid data | 1394 | * @edid: edid data |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index f6e98dd416c..fdc833d5cc7 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -35,6 +35,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
35 | 35 | ||
36 | i915-$(CONFIG_COMPAT) += i915_ioc32.o | 36 | i915-$(CONFIG_COMPAT) += i915_ioc32.o |
37 | 37 | ||
38 | i915-$(CONFIG_ACPI) += intel_acpi.o | ||
39 | |||
38 | obj-$(CONFIG_DRM_I915) += i915.o | 40 | obj-$(CONFIG_DRM_I915) += i915.o |
39 | 41 | ||
40 | CFLAGS_i915_trace_points.o := -I$(src) | 42 | CFLAGS_i915_trace_points.o := -I$(src) |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d598070fb27..7698983577d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -41,8 +41,7 @@ | |||
41 | #if defined(CONFIG_DEBUG_FS) | 41 | #if defined(CONFIG_DEBUG_FS) |
42 | 42 | ||
43 | enum { | 43 | enum { |
44 | RENDER_LIST, | 44 | ACTIVE_LIST, |
45 | BSD_LIST, | ||
46 | FLUSHING_LIST, | 45 | FLUSHING_LIST, |
47 | INACTIVE_LIST, | 46 | INACTIVE_LIST, |
48 | PINNED_LIST, | 47 | PINNED_LIST, |
@@ -72,7 +71,6 @@ static int i915_capabilities(struct seq_file *m, void *data) | |||
72 | B(is_pineview); | 71 | B(is_pineview); |
73 | B(is_broadwater); | 72 | B(is_broadwater); |
74 | B(is_crestline); | 73 | B(is_crestline); |
75 | B(is_ironlake); | ||
76 | B(has_fbc); | 74 | B(has_fbc); |
77 | B(has_rc6); | 75 | B(has_rc6); |
78 | B(has_pipe_cxsr); | 76 | B(has_pipe_cxsr); |
@@ -81,6 +79,8 @@ static int i915_capabilities(struct seq_file *m, void *data) | |||
81 | B(has_overlay); | 79 | B(has_overlay); |
82 | B(overlay_needs_physical); | 80 | B(overlay_needs_physical); |
83 | B(supports_tv); | 81 | B(supports_tv); |
82 | B(has_bsd_ring); | ||
83 | B(has_blt_ring); | ||
84 | #undef B | 84 | #undef B |
85 | 85 | ||
86 | return 0; | 86 | return 0; |
@@ -125,6 +125,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
125 | seq_printf(m, " (fence: %d)", obj->fence_reg); | 125 | seq_printf(m, " (fence: %d)", obj->fence_reg); |
126 | if (obj->gtt_space != NULL) | 126 | if (obj->gtt_space != NULL) |
127 | seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); | 127 | seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); |
128 | if (obj->ring != NULL) | ||
129 | seq_printf(m, " (%s)", obj->ring->name); | ||
128 | } | 130 | } |
129 | 131 | ||
130 | static int i915_gem_object_list_info(struct seq_file *m, void *data) | 132 | static int i915_gem_object_list_info(struct seq_file *m, void *data) |
@@ -143,13 +145,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
143 | return ret; | 145 | return ret; |
144 | 146 | ||
145 | switch (list) { | 147 | switch (list) { |
146 | case RENDER_LIST: | 148 | case ACTIVE_LIST: |
147 | seq_printf(m, "Render:\n"); | 149 | seq_printf(m, "Active:\n"); |
148 | head = &dev_priv->render_ring.active_list; | 150 | head = &dev_priv->mm.active_list; |
149 | break; | ||
150 | case BSD_LIST: | ||
151 | seq_printf(m, "BSD:\n"); | ||
152 | head = &dev_priv->bsd_ring.active_list; | ||
153 | break; | 151 | break; |
154 | case INACTIVE_LIST: | 152 | case INACTIVE_LIST: |
155 | seq_printf(m, "Inactive:\n"); | 153 | seq_printf(m, "Inactive:\n"); |
@@ -173,7 +171,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
173 | } | 171 | } |
174 | 172 | ||
175 | total_obj_size = total_gtt_size = count = 0; | 173 | total_obj_size = total_gtt_size = count = 0; |
176 | list_for_each_entry(obj_priv, head, list) { | 174 | list_for_each_entry(obj_priv, head, mm_list) { |
177 | seq_printf(m, " "); | 175 | seq_printf(m, " "); |
178 | describe_obj(m, obj_priv); | 176 | describe_obj(m, obj_priv); |
179 | seq_printf(m, "\n"); | 177 | seq_printf(m, "\n"); |
@@ -460,8 +458,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) | |||
460 | if (ret) | 458 | if (ret) |
461 | return ret; | 459 | return ret; |
462 | 460 | ||
463 | list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, | 461 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { |
464 | list) { | ||
465 | obj = &obj_priv->base; | 462 | obj = &obj_priv->base; |
466 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | 463 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { |
467 | seq_printf(m, "--- gtt_offset = 0x%08x\n", | 464 | seq_printf(m, "--- gtt_offset = 0x%08x\n", |
@@ -797,7 +794,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) | |||
797 | drm_i915_private_t *dev_priv = dev->dev_private; | 794 | drm_i915_private_t *dev_priv = dev->dev_private; |
798 | bool sr_enabled = false; | 795 | bool sr_enabled = false; |
799 | 796 | ||
800 | if (IS_IRONLAKE(dev)) | 797 | if (IS_GEN5(dev)) |
801 | sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; | 798 | sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; |
802 | else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) | 799 | else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) |
803 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; | 800 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; |
@@ -1020,8 +1017,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | |||
1020 | static struct drm_info_list i915_debugfs_list[] = { | 1017 | static struct drm_info_list i915_debugfs_list[] = { |
1021 | {"i915_capabilities", i915_capabilities, 0, 0}, | 1018 | {"i915_capabilities", i915_capabilities, 0, 0}, |
1022 | {"i915_gem_objects", i915_gem_object_info, 0}, | 1019 | {"i915_gem_objects", i915_gem_object_info, 0}, |
1023 | {"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST}, | 1020 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
1024 | {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST}, | ||
1025 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | 1021 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
1026 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 1022 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
1027 | {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, | 1023 | {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 251987307eb..7a26f4dd21a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -132,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
132 | 132 | ||
133 | mutex_lock(&dev->struct_mutex); | 133 | mutex_lock(&dev->struct_mutex); |
134 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 134 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); |
135 | if (HAS_BSD(dev)) | 135 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); |
136 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | 136 | intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); |
137 | mutex_unlock(&dev->struct_mutex); | 137 | mutex_unlock(&dev->struct_mutex); |
138 | 138 | ||
139 | /* Clear the HWS virtual address at teardown */ | 139 | /* Clear the HWS virtual address at teardown */ |
@@ -499,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
499 | } | 499 | } |
500 | 500 | ||
501 | 501 | ||
502 | if (IS_G4X(dev) || IS_IRONLAKE(dev)) { | 502 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
503 | BEGIN_LP_RING(2); | 503 | BEGIN_LP_RING(2); |
504 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); | 504 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); |
505 | OUT_RING(MI_NOOP); | 505 | OUT_RING(MI_NOOP); |
@@ -764,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
764 | case I915_PARAM_HAS_BSD: | 764 | case I915_PARAM_HAS_BSD: |
765 | value = HAS_BSD(dev); | 765 | value = HAS_BSD(dev); |
766 | break; | 766 | break; |
767 | case I915_PARAM_HAS_BLT: | ||
768 | value = HAS_BLT(dev); | ||
769 | break; | ||
767 | default: | 770 | default: |
768 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 771 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
769 | param->param); | 772 | param->param); |
@@ -1199,9 +1202,6 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1199 | /* Basic memrange allocator for stolen space (aka mm.vram) */ | 1202 | /* Basic memrange allocator for stolen space (aka mm.vram) */ |
1200 | drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); | 1203 | drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); |
1201 | 1204 | ||
1202 | /* We're off and running w/KMS */ | ||
1203 | dev_priv->mm.suspended = 0; | ||
1204 | |||
1205 | /* Let GEM Manage from end of prealloc space to end of aperture. | 1205 | /* Let GEM Manage from end of prealloc space to end of aperture. |
1206 | * | 1206 | * |
1207 | * However, leave one page at the end still bound to the scratch page. | 1207 | * However, leave one page at the end still bound to the scratch page. |
@@ -1235,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1235 | */ | 1235 | */ |
1236 | dev_priv->allow_batchbuffer = 1; | 1236 | dev_priv->allow_batchbuffer = 1; |
1237 | 1237 | ||
1238 | ret = intel_init_bios(dev); | 1238 | ret = intel_parse_bios(dev); |
1239 | if (ret) | 1239 | if (ret) |
1240 | DRM_INFO("failed to find VBIOS tables\n"); | 1240 | DRM_INFO("failed to find VBIOS tables\n"); |
1241 | 1241 | ||
@@ -1244,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1244 | if (ret) | 1244 | if (ret) |
1245 | goto cleanup_ringbuffer; | 1245 | goto cleanup_ringbuffer; |
1246 | 1246 | ||
1247 | intel_register_dsm_handler(); | ||
1248 | |||
1247 | ret = vga_switcheroo_register_client(dev->pdev, | 1249 | ret = vga_switcheroo_register_client(dev->pdev, |
1248 | i915_switcheroo_set_state, | 1250 | i915_switcheroo_set_state, |
1249 | i915_switcheroo_can_switch); | 1251 | i915_switcheroo_can_switch); |
@@ -1269,6 +1271,10 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1269 | goto cleanup_irq; | 1271 | goto cleanup_irq; |
1270 | 1272 | ||
1271 | drm_kms_helper_poll_init(dev); | 1273 | drm_kms_helper_poll_init(dev); |
1274 | |||
1275 | /* We're off and running w/KMS */ | ||
1276 | dev_priv->mm.suspended = 0; | ||
1277 | |||
1272 | return 0; | 1278 | return 0; |
1273 | 1279 | ||
1274 | cleanup_irq: | 1280 | cleanup_irq: |
@@ -1989,7 +1995,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1989 | 1995 | ||
1990 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 1996 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
1991 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 1997 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
1992 | if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { | 1998 | if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { |
1993 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ | 1999 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
1994 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | 2000 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
1995 | } | 2001 | } |
@@ -1999,6 +2005,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1999 | intel_setup_gmbus(dev); | 2005 | intel_setup_gmbus(dev); |
2000 | intel_opregion_setup(dev); | 2006 | intel_opregion_setup(dev); |
2001 | 2007 | ||
2008 | /* Make sure the bios did its job and set up vital registers */ | ||
2009 | intel_setup_bios(dev); | ||
2010 | |||
2002 | i915_gem_load(dev); | 2011 | i915_gem_load(dev); |
2003 | 2012 | ||
2004 | /* Init HWS */ | 2013 | /* Init HWS */ |
@@ -2010,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2010 | 2019 | ||
2011 | if (IS_PINEVIEW(dev)) | 2020 | if (IS_PINEVIEW(dev)) |
2012 | i915_pineview_get_mem_freq(dev); | 2021 | i915_pineview_get_mem_freq(dev); |
2013 | else if (IS_IRONLAKE(dev)) | 2022 | else if (IS_GEN5(dev)) |
2014 | i915_ironlake_get_mem_freq(dev); | 2023 | i915_ironlake_get_mem_freq(dev); |
2015 | 2024 | ||
2016 | /* On the 945G/GM, the chipset reports the MSI capability on the | 2025 | /* On the 945G/GM, the chipset reports the MSI capability on the |
@@ -2063,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2063 | dev_priv->mchdev_lock = &mchdev_lock; | 2072 | dev_priv->mchdev_lock = &mchdev_lock; |
2064 | spin_unlock(&mchdev_lock); | 2073 | spin_unlock(&mchdev_lock); |
2065 | 2074 | ||
2066 | /* XXX Prevent module unload due to memory corruption bugs. */ | ||
2067 | __module_get(THIS_MODULE); | ||
2068 | |||
2069 | return 0; | 2075 | return 0; |
2070 | 2076 | ||
2071 | out_workqueue_free: | 2077 | out_workqueue_free: |
@@ -2134,9 +2140,6 @@ int i915_driver_unload(struct drm_device *dev) | |||
2134 | if (dev->pdev->msi_enabled) | 2140 | if (dev->pdev->msi_enabled) |
2135 | pci_disable_msi(dev->pdev); | 2141 | pci_disable_msi(dev->pdev); |
2136 | 2142 | ||
2137 | if (dev_priv->regs != NULL) | ||
2138 | iounmap(dev_priv->regs); | ||
2139 | |||
2140 | intel_opregion_fini(dev); | 2143 | intel_opregion_fini(dev); |
2141 | 2144 | ||
2142 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 2145 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
@@ -2153,8 +2156,14 @@ int i915_driver_unload(struct drm_device *dev) | |||
2153 | drm_mm_takedown(&dev_priv->mm.vram); | 2156 | drm_mm_takedown(&dev_priv->mm.vram); |
2154 | 2157 | ||
2155 | intel_cleanup_overlay(dev); | 2158 | intel_cleanup_overlay(dev); |
2159 | |||
2160 | if (!I915_NEED_GFX_HWS(dev)) | ||
2161 | i915_free_hws(dev); | ||
2156 | } | 2162 | } |
2157 | 2163 | ||
2164 | if (dev_priv->regs != NULL) | ||
2165 | iounmap(dev_priv->regs); | ||
2166 | |||
2158 | intel_teardown_gmbus(dev); | 2167 | intel_teardown_gmbus(dev); |
2159 | intel_teardown_mchbar(dev); | 2168 | intel_teardown_mchbar(dev); |
2160 | 2169 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c3decb2fef4..8e632110c58 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -143,13 +143,13 @@ static const struct intel_device_info intel_pineview_info = { | |||
143 | }; | 143 | }; |
144 | 144 | ||
145 | static const struct intel_device_info intel_ironlake_d_info = { | 145 | static const struct intel_device_info intel_ironlake_d_info = { |
146 | .gen = 5, .is_ironlake = 1, | 146 | .gen = 5, |
147 | .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, | 147 | .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, |
148 | .has_bsd_ring = 1, | 148 | .has_bsd_ring = 1, |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static const struct intel_device_info intel_ironlake_m_info = { | 151 | static const struct intel_device_info intel_ironlake_m_info = { |
152 | .gen = 5, .is_ironlake = 1, .is_mobile = 1, | 152 | .gen = 5, .is_mobile = 1, |
153 | .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, | 153 | .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, |
154 | .has_bsd_ring = 1, | 154 | .has_bsd_ring = 1, |
155 | }; | 155 | }; |
@@ -158,12 +158,14 @@ static const struct intel_device_info intel_sandybridge_d_info = { | |||
158 | .gen = 6, | 158 | .gen = 6, |
159 | .need_gfx_hws = 1, .has_hotplug = 1, | 159 | .need_gfx_hws = 1, .has_hotplug = 1, |
160 | .has_bsd_ring = 1, | 160 | .has_bsd_ring = 1, |
161 | .has_blt_ring = 1, | ||
161 | }; | 162 | }; |
162 | 163 | ||
163 | static const struct intel_device_info intel_sandybridge_m_info = { | 164 | static const struct intel_device_info intel_sandybridge_m_info = { |
164 | .gen = 6, .is_mobile = 1, | 165 | .gen = 6, .is_mobile = 1, |
165 | .need_gfx_hws = 1, .has_hotplug = 1, | 166 | .need_gfx_hws = 1, .has_hotplug = 1, |
166 | .has_bsd_ring = 1, | 167 | .has_bsd_ring = 1, |
168 | .has_blt_ring = 1, | ||
167 | }; | 169 | }; |
168 | 170 | ||
169 | static const struct pci_device_id pciidlist[] = { /* aka */ | 171 | static const struct pci_device_id pciidlist[] = { /* aka */ |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 73ad8bff2c2..2c2c19b6285 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -206,7 +206,6 @@ struct intel_device_info { | |||
206 | u8 is_pineview : 1; | 206 | u8 is_pineview : 1; |
207 | u8 is_broadwater : 1; | 207 | u8 is_broadwater : 1; |
208 | u8 is_crestline : 1; | 208 | u8 is_crestline : 1; |
209 | u8 is_ironlake : 1; | ||
210 | u8 has_fbc : 1; | 209 | u8 has_fbc : 1; |
211 | u8 has_rc6 : 1; | 210 | u8 has_rc6 : 1; |
212 | u8 has_pipe_cxsr : 1; | 211 | u8 has_pipe_cxsr : 1; |
@@ -216,6 +215,7 @@ struct intel_device_info { | |||
216 | u8 overlay_needs_physical : 1; | 215 | u8 overlay_needs_physical : 1; |
217 | u8 supports_tv : 1; | 216 | u8 supports_tv : 1; |
218 | u8 has_bsd_ring : 1; | 217 | u8 has_bsd_ring : 1; |
218 | u8 has_blt_ring : 1; | ||
219 | }; | 219 | }; |
220 | 220 | ||
221 | enum no_fbc_reason { | 221 | enum no_fbc_reason { |
@@ -255,6 +255,7 @@ typedef struct drm_i915_private { | |||
255 | struct pci_dev *bridge_dev; | 255 | struct pci_dev *bridge_dev; |
256 | struct intel_ring_buffer render_ring; | 256 | struct intel_ring_buffer render_ring; |
257 | struct intel_ring_buffer bsd_ring; | 257 | struct intel_ring_buffer bsd_ring; |
258 | struct intel_ring_buffer blt_ring; | ||
258 | uint32_t next_seqno; | 259 | uint32_t next_seqno; |
259 | 260 | ||
260 | drm_dma_handle_t *status_page_dmah; | 261 | drm_dma_handle_t *status_page_dmah; |
@@ -339,17 +340,18 @@ typedef struct drm_i915_private { | |||
339 | unsigned int int_crt_support:1; | 340 | unsigned int int_crt_support:1; |
340 | unsigned int lvds_use_ssc:1; | 341 | unsigned int lvds_use_ssc:1; |
341 | int lvds_ssc_freq; | 342 | int lvds_ssc_freq; |
342 | |||
343 | struct { | 343 | struct { |
344 | u8 rate:4; | 344 | int rate; |
345 | u8 lanes:4; | 345 | int lanes; |
346 | u8 preemphasis:4; | 346 | int preemphasis; |
347 | u8 vswing:4; | 347 | int vswing; |
348 | 348 | ||
349 | u8 initialized:1; | 349 | bool initialized; |
350 | u8 support:1; | 350 | bool support; |
351 | u8 bpp:6; | 351 | int bpp; |
352 | struct edp_power_seq pps; | ||
352 | } edp; | 353 | } edp; |
354 | bool no_aux_handshake; | ||
353 | 355 | ||
354 | struct notifier_block lid_notifier; | 356 | struct notifier_block lid_notifier; |
355 | 357 | ||
@@ -547,6 +549,17 @@ typedef struct drm_i915_private { | |||
547 | struct list_head shrink_list; | 549 | struct list_head shrink_list; |
548 | 550 | ||
549 | /** | 551 | /** |
552 | * List of objects currently involved in rendering. | ||
553 | * | ||
554 | * Includes buffers having the contents of their GPU caches | ||
555 | * flushed, not necessarily primitives. last_rendering_seqno | ||
556 | * represents when the rendering involved will be completed. | ||
557 | * | ||
558 | * A reference is held on the buffer while on this list. | ||
559 | */ | ||
560 | struct list_head active_list; | ||
561 | |||
562 | /** | ||
550 | * List of objects which are not in the ringbuffer but which | 563 | * List of objects which are not in the ringbuffer but which |
551 | * still have a write_domain which needs to be flushed before | 564 | * still have a write_domain which needs to be flushed before |
552 | * unbinding. | 565 | * unbinding. |
@@ -558,15 +571,6 @@ typedef struct drm_i915_private { | |||
558 | struct list_head flushing_list; | 571 | struct list_head flushing_list; |
559 | 572 | ||
560 | /** | 573 | /** |
561 | * List of objects currently pending a GPU write flush. | ||
562 | * | ||
563 | * All elements on this list will belong to either the | ||
564 | * active_list or flushing_list, last_rendering_seqno can | ||
565 | * be used to differentiate between the two elements. | ||
566 | */ | ||
567 | struct list_head gpu_write_list; | ||
568 | |||
569 | /** | ||
570 | * LRU list of objects which are not in the ringbuffer and | 574 | * LRU list of objects which are not in the ringbuffer and |
571 | * are ready to unbind, but are still in the GTT. | 575 | * are ready to unbind, but are still in the GTT. |
572 | * | 576 | * |
@@ -713,7 +717,8 @@ struct drm_i915_gem_object { | |||
713 | struct drm_mm_node *gtt_space; | 717 | struct drm_mm_node *gtt_space; |
714 | 718 | ||
715 | /** This object's place on the active/flushing/inactive lists */ | 719 | /** This object's place on the active/flushing/inactive lists */ |
716 | struct list_head list; | 720 | struct list_head ring_list; |
721 | struct list_head mm_list; | ||
717 | /** This object's place on GPU write list */ | 722 | /** This object's place on GPU write list */ |
718 | struct list_head gpu_write_list; | 723 | struct list_head gpu_write_list; |
719 | /** This object's place on eviction list */ | 724 | /** This object's place on eviction list */ |
@@ -1136,6 +1141,15 @@ static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } | |||
1136 | static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } | 1141 | static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } |
1137 | #endif | 1142 | #endif |
1138 | 1143 | ||
1144 | /* intel_acpi.c */ | ||
1145 | #ifdef CONFIG_ACPI | ||
1146 | extern void intel_register_dsm_handler(void); | ||
1147 | extern void intel_unregister_dsm_handler(void); | ||
1148 | #else | ||
1149 | static inline void intel_register_dsm_handler(void) { return; } | ||
1150 | static inline void intel_unregister_dsm_handler(void) { return; } | ||
1151 | #endif /* CONFIG_ACPI */ | ||
1152 | |||
1139 | /* modesetting */ | 1153 | /* modesetting */ |
1140 | extern void intel_modeset_init(struct drm_device *dev); | 1154 | extern void intel_modeset_init(struct drm_device *dev); |
1141 | extern void intel_modeset_cleanup(struct drm_device *dev); | 1155 | extern void intel_modeset_cleanup(struct drm_device *dev); |
@@ -1268,7 +1282,6 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | |||
1268 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) | 1282 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) |
1269 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) | 1283 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) |
1270 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | 1284 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
1271 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) | ||
1272 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | 1285 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
1273 | 1286 | ||
1274 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) | 1287 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) |
@@ -1278,6 +1291,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | |||
1278 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) | 1291 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) |
1279 | 1292 | ||
1280 | #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) | 1293 | #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) |
1294 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) | ||
1281 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | 1295 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
1282 | 1296 | ||
1283 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) | 1297 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
@@ -1289,8 +1303,8 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | |||
1289 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ | 1303 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ |
1290 | IS_I915GM(dev))) | 1304 | IS_I915GM(dev))) |
1291 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) | 1305 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) |
1292 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1306 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
1293 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1307 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
1294 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) | 1308 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) |
1295 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) | 1309 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) |
1296 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) | 1310 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
@@ -1302,9 +1316,8 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | |||
1302 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) | 1316 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
1303 | #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) | 1317 | #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) |
1304 | 1318 | ||
1305 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ | 1319 | #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) |
1306 | IS_GEN6(dev)) | 1320 | #define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) |
1307 | #define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) | ||
1308 | 1321 | ||
1309 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) | 1322 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) |
1310 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | 1323 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 100a7537980..6c2618d884e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -244,12 +244,17 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
244 | return -ENOMEM; | 244 | return -ENOMEM; |
245 | 245 | ||
246 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 246 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
247 | /* drop reference from allocate - handle holds it now */ | ||
248 | drm_gem_object_unreference_unlocked(obj); | ||
249 | if (ret) { | 247 | if (ret) { |
248 | drm_gem_object_release(obj); | ||
249 | i915_gem_info_remove_obj(dev->dev_private, obj->size); | ||
250 | kfree(obj); | ||
250 | return ret; | 251 | return ret; |
251 | } | 252 | } |
252 | 253 | ||
254 | /* drop reference from allocate - handle holds it now */ | ||
255 | drm_gem_object_unreference(obj); | ||
256 | trace_i915_gem_object_create(obj); | ||
257 | |||
253 | args->handle = handle; | 258 | args->handle = handle; |
254 | return 0; | 259 | return 0; |
255 | } | 260 | } |
@@ -260,19 +265,14 @@ fast_shmem_read(struct page **pages, | |||
260 | char __user *data, | 265 | char __user *data, |
261 | int length) | 266 | int length) |
262 | { | 267 | { |
263 | char __iomem *vaddr; | 268 | char *vaddr; |
264 | int unwritten; | 269 | int ret; |
265 | 270 | ||
266 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); | 271 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); |
267 | if (vaddr == NULL) | 272 | ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); |
268 | return -ENOMEM; | ||
269 | unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); | ||
270 | kunmap_atomic(vaddr, KM_USER0); | 273 | kunmap_atomic(vaddr, KM_USER0); |
271 | 274 | ||
272 | if (unwritten) | 275 | return ret; |
273 | return -EFAULT; | ||
274 | |||
275 | return 0; | ||
276 | } | 276 | } |
277 | 277 | ||
278 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | 278 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) |
@@ -366,24 +366,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
366 | loff_t offset, page_base; | 366 | loff_t offset, page_base; |
367 | char __user *user_data; | 367 | char __user *user_data; |
368 | int page_offset, page_length; | 368 | int page_offset, page_length; |
369 | int ret; | ||
370 | 369 | ||
371 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 370 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
372 | remain = args->size; | 371 | remain = args->size; |
373 | 372 | ||
374 | ret = i915_mutex_lock_interruptible(dev); | ||
375 | if (ret) | ||
376 | return ret; | ||
377 | |||
378 | ret = i915_gem_object_get_pages(obj, 0); | ||
379 | if (ret != 0) | ||
380 | goto fail_unlock; | ||
381 | |||
382 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | ||
383 | args->size); | ||
384 | if (ret != 0) | ||
385 | goto fail_put_pages; | ||
386 | |||
387 | obj_priv = to_intel_bo(obj); | 373 | obj_priv = to_intel_bo(obj); |
388 | offset = args->offset; | 374 | offset = args->offset; |
389 | 375 | ||
@@ -400,23 +386,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
400 | if ((page_offset + remain) > PAGE_SIZE) | 386 | if ((page_offset + remain) > PAGE_SIZE) |
401 | page_length = PAGE_SIZE - page_offset; | 387 | page_length = PAGE_SIZE - page_offset; |
402 | 388 | ||
403 | ret = fast_shmem_read(obj_priv->pages, | 389 | if (fast_shmem_read(obj_priv->pages, |
404 | page_base, page_offset, | 390 | page_base, page_offset, |
405 | user_data, page_length); | 391 | user_data, page_length)) |
406 | if (ret) | 392 | return -EFAULT; |
407 | goto fail_put_pages; | ||
408 | 393 | ||
409 | remain -= page_length; | 394 | remain -= page_length; |
410 | user_data += page_length; | 395 | user_data += page_length; |
411 | offset += page_length; | 396 | offset += page_length; |
412 | } | 397 | } |
413 | 398 | ||
414 | fail_put_pages: | 399 | return 0; |
415 | i915_gem_object_put_pages(obj); | ||
416 | fail_unlock: | ||
417 | mutex_unlock(&dev->struct_mutex); | ||
418 | |||
419 | return ret; | ||
420 | } | 400 | } |
421 | 401 | ||
422 | static int | 402 | static int |
@@ -477,33 +457,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
477 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 457 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; |
478 | num_pages = last_data_page - first_data_page + 1; | 458 | num_pages = last_data_page - first_data_page + 1; |
479 | 459 | ||
480 | user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); | 460 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); |
481 | if (user_pages == NULL) | 461 | if (user_pages == NULL) |
482 | return -ENOMEM; | 462 | return -ENOMEM; |
483 | 463 | ||
464 | mutex_unlock(&dev->struct_mutex); | ||
484 | down_read(&mm->mmap_sem); | 465 | down_read(&mm->mmap_sem); |
485 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 466 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, |
486 | num_pages, 1, 0, user_pages, NULL); | 467 | num_pages, 1, 0, user_pages, NULL); |
487 | up_read(&mm->mmap_sem); | 468 | up_read(&mm->mmap_sem); |
469 | mutex_lock(&dev->struct_mutex); | ||
488 | if (pinned_pages < num_pages) { | 470 | if (pinned_pages < num_pages) { |
489 | ret = -EFAULT; | 471 | ret = -EFAULT; |
490 | goto fail_put_user_pages; | 472 | goto out; |
491 | } | 473 | } |
492 | 474 | ||
493 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 475 | ret = i915_gem_object_set_cpu_read_domain_range(obj, |
494 | 476 | args->offset, | |
495 | ret = i915_mutex_lock_interruptible(dev); | 477 | args->size); |
496 | if (ret) | ||
497 | goto fail_put_user_pages; | ||
498 | |||
499 | ret = i915_gem_object_get_pages_or_evict(obj); | ||
500 | if (ret) | 478 | if (ret) |
501 | goto fail_unlock; | 479 | goto out; |
502 | 480 | ||
503 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | 481 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
504 | args->size); | ||
505 | if (ret != 0) | ||
506 | goto fail_put_pages; | ||
507 | 482 | ||
508 | obj_priv = to_intel_bo(obj); | 483 | obj_priv = to_intel_bo(obj); |
509 | offset = args->offset; | 484 | offset = args->offset; |
@@ -548,11 +523,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
548 | offset += page_length; | 523 | offset += page_length; |
549 | } | 524 | } |
550 | 525 | ||
551 | fail_put_pages: | 526 | out: |
552 | i915_gem_object_put_pages(obj); | ||
553 | fail_unlock: | ||
554 | mutex_unlock(&dev->struct_mutex); | ||
555 | fail_put_user_pages: | ||
556 | for (i = 0; i < pinned_pages; i++) { | 527 | for (i = 0; i < pinned_pages; i++) { |
557 | SetPageDirty(user_pages[i]); | 528 | SetPageDirty(user_pages[i]); |
558 | page_cache_release(user_pages[i]); | 529 | page_cache_release(user_pages[i]); |
@@ -576,9 +547,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
576 | struct drm_i915_gem_object *obj_priv; | 547 | struct drm_i915_gem_object *obj_priv; |
577 | int ret = 0; | 548 | int ret = 0; |
578 | 549 | ||
550 | ret = i915_mutex_lock_interruptible(dev); | ||
551 | if (ret) | ||
552 | return ret; | ||
553 | |||
579 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 554 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
580 | if (obj == NULL) | 555 | if (obj == NULL) { |
581 | return -ENOENT; | 556 | ret = -ENOENT; |
557 | goto unlock; | ||
558 | } | ||
582 | obj_priv = to_intel_bo(obj); | 559 | obj_priv = to_intel_bo(obj); |
583 | 560 | ||
584 | /* Bounds check source. */ | 561 | /* Bounds check source. */ |
@@ -597,17 +574,35 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
597 | goto out; | 574 | goto out; |
598 | } | 575 | } |
599 | 576 | ||
600 | if (i915_gem_object_needs_bit17_swizzle(obj)) { | 577 | ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, |
601 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); | 578 | args->size); |
602 | } else { | 579 | if (ret) { |
603 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | 580 | ret = -EFAULT; |
604 | if (ret != 0) | 581 | goto out; |
605 | ret = i915_gem_shmem_pread_slow(dev, obj, args, | ||
606 | file_priv); | ||
607 | } | 582 | } |
608 | 583 | ||
584 | ret = i915_gem_object_get_pages_or_evict(obj); | ||
585 | if (ret) | ||
586 | goto out; | ||
587 | |||
588 | ret = i915_gem_object_set_cpu_read_domain_range(obj, | ||
589 | args->offset, | ||
590 | args->size); | ||
591 | if (ret) | ||
592 | goto out_put; | ||
593 | |||
594 | ret = -EFAULT; | ||
595 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | ||
596 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | ||
597 | if (ret == -EFAULT) | ||
598 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); | ||
599 | |||
600 | out_put: | ||
601 | i915_gem_object_put_pages(obj); | ||
609 | out: | 602 | out: |
610 | drm_gem_object_unreference_unlocked(obj); | 603 | drm_gem_object_unreference(obj); |
604 | unlock: | ||
605 | mutex_unlock(&dev->struct_mutex); | ||
611 | return ret; | 606 | return ret; |
612 | } | 607 | } |
613 | 608 | ||
@@ -628,9 +623,7 @@ fast_user_write(struct io_mapping *mapping, | |||
628 | unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, | 623 | unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, |
629 | user_data, length); | 624 | user_data, length); |
630 | io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); | 625 | io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); |
631 | if (unwritten) | 626 | return unwritten; |
632 | return -EFAULT; | ||
633 | return 0; | ||
634 | } | 627 | } |
635 | 628 | ||
636 | /* Here's the write path which can sleep for | 629 | /* Here's the write path which can sleep for |
@@ -663,18 +656,14 @@ fast_shmem_write(struct page **pages, | |||
663 | char __user *data, | 656 | char __user *data, |
664 | int length) | 657 | int length) |
665 | { | 658 | { |
666 | char __iomem *vaddr; | 659 | char *vaddr; |
667 | unsigned long unwritten; | 660 | int ret; |
668 | 661 | ||
669 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); | 662 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); |
670 | if (vaddr == NULL) | 663 | ret = __copy_from_user_inatomic(vaddr + page_offset, data, length); |
671 | return -ENOMEM; | ||
672 | unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); | ||
673 | kunmap_atomic(vaddr, KM_USER0); | 664 | kunmap_atomic(vaddr, KM_USER0); |
674 | 665 | ||
675 | if (unwritten) | 666 | return ret; |
676 | return -EFAULT; | ||
677 | return 0; | ||
678 | } | 667 | } |
679 | 668 | ||
680 | /** | 669 | /** |
@@ -692,24 +681,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
692 | loff_t offset, page_base; | 681 | loff_t offset, page_base; |
693 | char __user *user_data; | 682 | char __user *user_data; |
694 | int page_offset, page_length; | 683 | int page_offset, page_length; |
695 | int ret; | ||
696 | 684 | ||
697 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 685 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
698 | remain = args->size; | 686 | remain = args->size; |
699 | 687 | ||
700 | ret = i915_mutex_lock_interruptible(dev); | ||
701 | if (ret) | ||
702 | return ret; | ||
703 | |||
704 | ret = i915_gem_object_pin(obj, 0); | ||
705 | if (ret) { | ||
706 | mutex_unlock(&dev->struct_mutex); | ||
707 | return ret; | ||
708 | } | ||
709 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | ||
710 | if (ret) | ||
711 | goto fail; | ||
712 | |||
713 | obj_priv = to_intel_bo(obj); | 688 | obj_priv = to_intel_bo(obj); |
714 | offset = obj_priv->gtt_offset + args->offset; | 689 | offset = obj_priv->gtt_offset + args->offset; |
715 | 690 | ||
@@ -726,26 +701,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
726 | if ((page_offset + remain) > PAGE_SIZE) | 701 | if ((page_offset + remain) > PAGE_SIZE) |
727 | page_length = PAGE_SIZE - page_offset; | 702 | page_length = PAGE_SIZE - page_offset; |
728 | 703 | ||
729 | ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base, | ||
730 | page_offset, user_data, page_length); | ||
731 | |||
732 | /* If we get a fault while copying data, then (presumably) our | 704 | /* If we get a fault while copying data, then (presumably) our |
733 | * source page isn't available. Return the error and we'll | 705 | * source page isn't available. Return the error and we'll |
734 | * retry in the slow path. | 706 | * retry in the slow path. |
735 | */ | 707 | */ |
736 | if (ret) | 708 | if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, |
737 | goto fail; | 709 | page_offset, user_data, page_length)) |
710 | |||
711 | return -EFAULT; | ||
738 | 712 | ||
739 | remain -= page_length; | 713 | remain -= page_length; |
740 | user_data += page_length; | 714 | user_data += page_length; |
741 | offset += page_length; | 715 | offset += page_length; |
742 | } | 716 | } |
743 | 717 | ||
744 | fail: | 718 | return 0; |
745 | i915_gem_object_unpin(obj); | ||
746 | mutex_unlock(&dev->struct_mutex); | ||
747 | |||
748 | return ret; | ||
749 | } | 719 | } |
750 | 720 | ||
751 | /** | 721 | /** |
@@ -782,30 +752,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
782 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 752 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; |
783 | num_pages = last_data_page - first_data_page + 1; | 753 | num_pages = last_data_page - first_data_page + 1; |
784 | 754 | ||
785 | user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); | 755 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); |
786 | if (user_pages == NULL) | 756 | if (user_pages == NULL) |
787 | return -ENOMEM; | 757 | return -ENOMEM; |
788 | 758 | ||
759 | mutex_unlock(&dev->struct_mutex); | ||
789 | down_read(&mm->mmap_sem); | 760 | down_read(&mm->mmap_sem); |
790 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 761 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, |
791 | num_pages, 0, 0, user_pages, NULL); | 762 | num_pages, 0, 0, user_pages, NULL); |
792 | up_read(&mm->mmap_sem); | 763 | up_read(&mm->mmap_sem); |
764 | mutex_lock(&dev->struct_mutex); | ||
793 | if (pinned_pages < num_pages) { | 765 | if (pinned_pages < num_pages) { |
794 | ret = -EFAULT; | 766 | ret = -EFAULT; |
795 | goto out_unpin_pages; | 767 | goto out_unpin_pages; |
796 | } | 768 | } |
797 | 769 | ||
798 | ret = i915_mutex_lock_interruptible(dev); | ||
799 | if (ret) | ||
800 | goto out_unpin_pages; | ||
801 | |||
802 | ret = i915_gem_object_pin(obj, 0); | ||
803 | if (ret) | ||
804 | goto out_unlock; | ||
805 | |||
806 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 770 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
807 | if (ret) | 771 | if (ret) |
808 | goto out_unpin_object; | 772 | goto out_unpin_pages; |
809 | 773 | ||
810 | obj_priv = to_intel_bo(obj); | 774 | obj_priv = to_intel_bo(obj); |
811 | offset = obj_priv->gtt_offset + args->offset; | 775 | offset = obj_priv->gtt_offset + args->offset; |
@@ -841,10 +805,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
841 | data_ptr += page_length; | 805 | data_ptr += page_length; |
842 | } | 806 | } |
843 | 807 | ||
844 | out_unpin_object: | ||
845 | i915_gem_object_unpin(obj); | ||
846 | out_unlock: | ||
847 | mutex_unlock(&dev->struct_mutex); | ||
848 | out_unpin_pages: | 808 | out_unpin_pages: |
849 | for (i = 0; i < pinned_pages; i++) | 809 | for (i = 0; i < pinned_pages; i++) |
850 | page_cache_release(user_pages[i]); | 810 | page_cache_release(user_pages[i]); |
@@ -867,23 +827,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
867 | loff_t offset, page_base; | 827 | loff_t offset, page_base; |
868 | char __user *user_data; | 828 | char __user *user_data; |
869 | int page_offset, page_length; | 829 | int page_offset, page_length; |
870 | int ret; | ||
871 | 830 | ||
872 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 831 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
873 | remain = args->size; | 832 | remain = args->size; |
874 | 833 | ||
875 | ret = i915_mutex_lock_interruptible(dev); | ||
876 | if (ret) | ||
877 | return ret; | ||
878 | |||
879 | ret = i915_gem_object_get_pages(obj, 0); | ||
880 | if (ret != 0) | ||
881 | goto fail_unlock; | ||
882 | |||
883 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
884 | if (ret != 0) | ||
885 | goto fail_put_pages; | ||
886 | |||
887 | obj_priv = to_intel_bo(obj); | 834 | obj_priv = to_intel_bo(obj); |
888 | offset = args->offset; | 835 | offset = args->offset; |
889 | obj_priv->dirty = 1; | 836 | obj_priv->dirty = 1; |
@@ -901,23 +848,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
901 | if ((page_offset + remain) > PAGE_SIZE) | 848 | if ((page_offset + remain) > PAGE_SIZE) |
902 | page_length = PAGE_SIZE - page_offset; | 849 | page_length = PAGE_SIZE - page_offset; |
903 | 850 | ||
904 | ret = fast_shmem_write(obj_priv->pages, | 851 | if (fast_shmem_write(obj_priv->pages, |
905 | page_base, page_offset, | 852 | page_base, page_offset, |
906 | user_data, page_length); | 853 | user_data, page_length)) |
907 | if (ret) | 854 | return -EFAULT; |
908 | goto fail_put_pages; | ||
909 | 855 | ||
910 | remain -= page_length; | 856 | remain -= page_length; |
911 | user_data += page_length; | 857 | user_data += page_length; |
912 | offset += page_length; | 858 | offset += page_length; |
913 | } | 859 | } |
914 | 860 | ||
915 | fail_put_pages: | 861 | return 0; |
916 | i915_gem_object_put_pages(obj); | ||
917 | fail_unlock: | ||
918 | mutex_unlock(&dev->struct_mutex); | ||
919 | |||
920 | return ret; | ||
921 | } | 862 | } |
922 | 863 | ||
923 | /** | 864 | /** |
@@ -955,32 +896,26 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
955 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 896 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; |
956 | num_pages = last_data_page - first_data_page + 1; | 897 | num_pages = last_data_page - first_data_page + 1; |
957 | 898 | ||
958 | user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); | 899 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); |
959 | if (user_pages == NULL) | 900 | if (user_pages == NULL) |
960 | return -ENOMEM; | 901 | return -ENOMEM; |
961 | 902 | ||
903 | mutex_unlock(&dev->struct_mutex); | ||
962 | down_read(&mm->mmap_sem); | 904 | down_read(&mm->mmap_sem); |
963 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 905 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, |
964 | num_pages, 0, 0, user_pages, NULL); | 906 | num_pages, 0, 0, user_pages, NULL); |
965 | up_read(&mm->mmap_sem); | 907 | up_read(&mm->mmap_sem); |
908 | mutex_lock(&dev->struct_mutex); | ||
966 | if (pinned_pages < num_pages) { | 909 | if (pinned_pages < num_pages) { |
967 | ret = -EFAULT; | 910 | ret = -EFAULT; |
968 | goto fail_put_user_pages; | 911 | goto out; |
969 | } | 912 | } |
970 | 913 | ||
971 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 914 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
972 | |||
973 | ret = i915_mutex_lock_interruptible(dev); | ||
974 | if (ret) | ||
975 | goto fail_put_user_pages; | ||
976 | |||
977 | ret = i915_gem_object_get_pages_or_evict(obj); | ||
978 | if (ret) | 915 | if (ret) |
979 | goto fail_unlock; | 916 | goto out; |
980 | 917 | ||
981 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 918 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
982 | if (ret != 0) | ||
983 | goto fail_put_pages; | ||
984 | 919 | ||
985 | obj_priv = to_intel_bo(obj); | 920 | obj_priv = to_intel_bo(obj); |
986 | offset = args->offset; | 921 | offset = args->offset; |
@@ -1026,11 +961,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
1026 | offset += page_length; | 961 | offset += page_length; |
1027 | } | 962 | } |
1028 | 963 | ||
1029 | fail_put_pages: | 964 | out: |
1030 | i915_gem_object_put_pages(obj); | ||
1031 | fail_unlock: | ||
1032 | mutex_unlock(&dev->struct_mutex); | ||
1033 | fail_put_user_pages: | ||
1034 | for (i = 0; i < pinned_pages; i++) | 965 | for (i = 0; i < pinned_pages; i++) |
1035 | page_cache_release(user_pages[i]); | 966 | page_cache_release(user_pages[i]); |
1036 | drm_free_large(user_pages); | 967 | drm_free_large(user_pages); |
@@ -1045,18 +976,25 @@ fail_put_user_pages: | |||
1045 | */ | 976 | */ |
1046 | int | 977 | int |
1047 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | 978 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
1048 | struct drm_file *file_priv) | 979 | struct drm_file *file) |
1049 | { | 980 | { |
1050 | struct drm_i915_gem_pwrite *args = data; | 981 | struct drm_i915_gem_pwrite *args = data; |
1051 | struct drm_gem_object *obj; | 982 | struct drm_gem_object *obj; |
1052 | struct drm_i915_gem_object *obj_priv; | 983 | struct drm_i915_gem_object *obj_priv; |
1053 | int ret = 0; | 984 | int ret = 0; |
1054 | 985 | ||
1055 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 986 | ret = i915_mutex_lock_interruptible(dev); |
1056 | if (obj == NULL) | 987 | if (ret) |
1057 | return -ENOENT; | 988 | return ret; |
989 | |||
990 | obj = drm_gem_object_lookup(dev, file, args->handle); | ||
991 | if (obj == NULL) { | ||
992 | ret = -ENOENT; | ||
993 | goto unlock; | ||
994 | } | ||
1058 | obj_priv = to_intel_bo(obj); | 995 | obj_priv = to_intel_bo(obj); |
1059 | 996 | ||
997 | |||
1060 | /* Bounds check destination. */ | 998 | /* Bounds check destination. */ |
1061 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 999 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
1062 | ret = -EINVAL; | 1000 | ret = -EINVAL; |
@@ -1073,6 +1011,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1073 | goto out; | 1011 | goto out; |
1074 | } | 1012 | } |
1075 | 1013 | ||
1014 | ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, | ||
1015 | args->size); | ||
1016 | if (ret) { | ||
1017 | ret = -EFAULT; | ||
1018 | goto out; | ||
1019 | } | ||
1020 | |||
1076 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 1021 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
1077 | * it would end up going through the fenced access, and we'll get | 1022 | * it would end up going through the fenced access, and we'll get |
1078 | * different detiling behavior between reading and writing. | 1023 | * different detiling behavior between reading and writing. |
@@ -1080,32 +1025,47 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1080 | * perspective, requiring manual detiling by the client. | 1025 | * perspective, requiring manual detiling by the client. |
1081 | */ | 1026 | */ |
1082 | if (obj_priv->phys_obj) | 1027 | if (obj_priv->phys_obj) |
1083 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); | 1028 | ret = i915_gem_phys_pwrite(dev, obj, args, file); |
1084 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | 1029 | else if (obj_priv->tiling_mode == I915_TILING_NONE && |
1085 | obj_priv->gtt_space && | 1030 | obj_priv->gtt_space && |
1086 | obj->write_domain != I915_GEM_DOMAIN_CPU) { | 1031 | obj->write_domain != I915_GEM_DOMAIN_CPU) { |
1087 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); | 1032 | ret = i915_gem_object_pin(obj, 0); |
1088 | if (ret == -EFAULT) { | 1033 | if (ret) |
1089 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, | 1034 | goto out; |
1090 | file_priv); | 1035 | |
1091 | } | 1036 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
1092 | } else if (i915_gem_object_needs_bit17_swizzle(obj)) { | 1037 | if (ret) |
1093 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); | 1038 | goto out_unpin; |
1039 | |||
1040 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); | ||
1041 | if (ret == -EFAULT) | ||
1042 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file); | ||
1043 | |||
1044 | out_unpin: | ||
1045 | i915_gem_object_unpin(obj); | ||
1094 | } else { | 1046 | } else { |
1095 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); | 1047 | ret = i915_gem_object_get_pages_or_evict(obj); |
1096 | if (ret == -EFAULT) { | 1048 | if (ret) |
1097 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, | 1049 | goto out; |
1098 | file_priv); | ||
1099 | } | ||
1100 | } | ||
1101 | 1050 | ||
1102 | #if WATCH_PWRITE | 1051 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
1103 | if (ret) | 1052 | if (ret) |
1104 | DRM_INFO("pwrite failed %d\n", ret); | 1053 | goto out_put; |
1105 | #endif | 1054 | |
1055 | ret = -EFAULT; | ||
1056 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | ||
1057 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); | ||
1058 | if (ret == -EFAULT) | ||
1059 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); | ||
1060 | |||
1061 | out_put: | ||
1062 | i915_gem_object_put_pages(obj); | ||
1063 | } | ||
1106 | 1064 | ||
1107 | out: | 1065 | out: |
1108 | drm_gem_object_unreference_unlocked(obj); | 1066 | drm_gem_object_unreference(obj); |
1067 | unlock: | ||
1068 | mutex_unlock(&dev->struct_mutex); | ||
1109 | return ret; | 1069 | return ret; |
1110 | } | 1070 | } |
1111 | 1071 | ||
@@ -1141,16 +1101,16 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1141 | if (write_domain != 0 && read_domains != write_domain) | 1101 | if (write_domain != 0 && read_domains != write_domain) |
1142 | return -EINVAL; | 1102 | return -EINVAL; |
1143 | 1103 | ||
1144 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
1145 | if (obj == NULL) | ||
1146 | return -ENOENT; | ||
1147 | obj_priv = to_intel_bo(obj); | ||
1148 | |||
1149 | ret = i915_mutex_lock_interruptible(dev); | 1104 | ret = i915_mutex_lock_interruptible(dev); |
1150 | if (ret) { | 1105 | if (ret) |
1151 | drm_gem_object_unreference_unlocked(obj); | ||
1152 | return ret; | 1106 | return ret; |
1107 | |||
1108 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
1109 | if (obj == NULL) { | ||
1110 | ret = -ENOENT; | ||
1111 | goto unlock; | ||
1153 | } | 1112 | } |
1113 | obj_priv = to_intel_bo(obj); | ||
1154 | 1114 | ||
1155 | intel_mark_busy(dev, obj); | 1115 | intel_mark_busy(dev, obj); |
1156 | 1116 | ||
@@ -1179,9 +1139,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1179 | 1139 | ||
1180 | /* Maintain LRU order of "inactive" objects */ | 1140 | /* Maintain LRU order of "inactive" objects */ |
1181 | if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) | 1141 | if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) |
1182 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1142 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); |
1183 | 1143 | ||
1184 | drm_gem_object_unreference(obj); | 1144 | drm_gem_object_unreference(obj); |
1145 | unlock: | ||
1185 | mutex_unlock(&dev->struct_mutex); | 1146 | mutex_unlock(&dev->struct_mutex); |
1186 | return ret; | 1147 | return ret; |
1187 | } | 1148 | } |
@@ -1200,14 +1161,14 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1200 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1161 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
1201 | return -ENODEV; | 1162 | return -ENODEV; |
1202 | 1163 | ||
1203 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
1204 | if (obj == NULL) | ||
1205 | return -ENOENT; | ||
1206 | |||
1207 | ret = i915_mutex_lock_interruptible(dev); | 1164 | ret = i915_mutex_lock_interruptible(dev); |
1208 | if (ret) { | 1165 | if (ret) |
1209 | drm_gem_object_unreference_unlocked(obj); | ||
1210 | return ret; | 1166 | return ret; |
1167 | |||
1168 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
1169 | if (obj == NULL) { | ||
1170 | ret = -ENOENT; | ||
1171 | goto unlock; | ||
1211 | } | 1172 | } |
1212 | 1173 | ||
1213 | /* Pinned buffers may be scanout, so flush the cache */ | 1174 | /* Pinned buffers may be scanout, so flush the cache */ |
@@ -1215,6 +1176,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1215 | i915_gem_object_flush_cpu_write_domain(obj); | 1176 | i915_gem_object_flush_cpu_write_domain(obj); |
1216 | 1177 | ||
1217 | drm_gem_object_unreference(obj); | 1178 | drm_gem_object_unreference(obj); |
1179 | unlock: | ||
1218 | mutex_unlock(&dev->struct_mutex); | 1180 | mutex_unlock(&dev->struct_mutex); |
1219 | return ret; | 1181 | return ret; |
1220 | } | 1182 | } |
@@ -1309,7 +1271,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1309 | } | 1271 | } |
1310 | 1272 | ||
1311 | if (i915_gem_object_is_inactive(obj_priv)) | 1273 | if (i915_gem_object_is_inactive(obj_priv)) |
1312 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1274 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); |
1313 | 1275 | ||
1314 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 1276 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + |
1315 | page_offset; | 1277 | page_offset; |
@@ -1512,33 +1474,27 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1512 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1474 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
1513 | return -ENODEV; | 1475 | return -ENODEV; |
1514 | 1476 | ||
1515 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
1516 | if (obj == NULL) | ||
1517 | return -ENOENT; | ||
1518 | |||
1519 | ret = i915_mutex_lock_interruptible(dev); | 1477 | ret = i915_mutex_lock_interruptible(dev); |
1520 | if (ret) { | 1478 | if (ret) |
1521 | drm_gem_object_unreference_unlocked(obj); | ||
1522 | return ret; | 1479 | return ret; |
1523 | } | ||
1524 | 1480 | ||
1481 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
1482 | if (obj == NULL) { | ||
1483 | ret = -ENOENT; | ||
1484 | goto unlock; | ||
1485 | } | ||
1525 | obj_priv = to_intel_bo(obj); | 1486 | obj_priv = to_intel_bo(obj); |
1526 | 1487 | ||
1527 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 1488 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
1528 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 1489 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); |
1529 | drm_gem_object_unreference(obj); | 1490 | ret = -EINVAL; |
1530 | mutex_unlock(&dev->struct_mutex); | 1491 | goto out; |
1531 | return -EINVAL; | ||
1532 | } | 1492 | } |
1533 | 1493 | ||
1534 | |||
1535 | if (!obj_priv->mmap_offset) { | 1494 | if (!obj_priv->mmap_offset) { |
1536 | ret = i915_gem_create_mmap_offset(obj); | 1495 | ret = i915_gem_create_mmap_offset(obj); |
1537 | if (ret) { | 1496 | if (ret) |
1538 | drm_gem_object_unreference(obj); | 1497 | goto out; |
1539 | mutex_unlock(&dev->struct_mutex); | ||
1540 | return ret; | ||
1541 | } | ||
1542 | } | 1498 | } |
1543 | 1499 | ||
1544 | args->offset = obj_priv->mmap_offset; | 1500 | args->offset = obj_priv->mmap_offset; |
@@ -1549,17 +1505,15 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1549 | */ | 1505 | */ |
1550 | if (!obj_priv->agp_mem) { | 1506 | if (!obj_priv->agp_mem) { |
1551 | ret = i915_gem_object_bind_to_gtt(obj, 0); | 1507 | ret = i915_gem_object_bind_to_gtt(obj, 0); |
1552 | if (ret) { | 1508 | if (ret) |
1553 | drm_gem_object_unreference(obj); | 1509 | goto out; |
1554 | mutex_unlock(&dev->struct_mutex); | ||
1555 | return ret; | ||
1556 | } | ||
1557 | } | 1510 | } |
1558 | 1511 | ||
1512 | out: | ||
1559 | drm_gem_object_unreference(obj); | 1513 | drm_gem_object_unreference(obj); |
1514 | unlock: | ||
1560 | mutex_unlock(&dev->struct_mutex); | 1515 | mutex_unlock(&dev->struct_mutex); |
1561 | 1516 | return ret; | |
1562 | return 0; | ||
1563 | } | 1517 | } |
1564 | 1518 | ||
1565 | static void | 1519 | static void |
@@ -1611,6 +1565,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, | |||
1611 | struct intel_ring_buffer *ring) | 1565 | struct intel_ring_buffer *ring) |
1612 | { | 1566 | { |
1613 | struct drm_device *dev = obj->dev; | 1567 | struct drm_device *dev = obj->dev; |
1568 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1614 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1569 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1615 | uint32_t seqno = i915_gem_next_request_seqno(dev, ring); | 1570 | uint32_t seqno = i915_gem_next_request_seqno(dev, ring); |
1616 | 1571 | ||
@@ -1624,7 +1579,8 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, | |||
1624 | } | 1579 | } |
1625 | 1580 | ||
1626 | /* Move from whatever list we were on to the tail of execution. */ | 1581 | /* Move from whatever list we were on to the tail of execution. */ |
1627 | list_move_tail(&obj_priv->list, &ring->active_list); | 1582 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); |
1583 | list_move_tail(&obj_priv->ring_list, &ring->active_list); | ||
1628 | obj_priv->last_rendering_seqno = seqno; | 1584 | obj_priv->last_rendering_seqno = seqno; |
1629 | } | 1585 | } |
1630 | 1586 | ||
@@ -1636,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | |||
1636 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1592 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1637 | 1593 | ||
1638 | BUG_ON(!obj_priv->active); | 1594 | BUG_ON(!obj_priv->active); |
1639 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); | 1595 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); |
1596 | list_del_init(&obj_priv->ring_list); | ||
1640 | obj_priv->last_rendering_seqno = 0; | 1597 | obj_priv->last_rendering_seqno = 0; |
1641 | } | 1598 | } |
1642 | 1599 | ||
@@ -1675,9 +1632,10 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1675 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1632 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1676 | 1633 | ||
1677 | if (obj_priv->pin_count != 0) | 1634 | if (obj_priv->pin_count != 0) |
1678 | list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list); | 1635 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); |
1679 | else | 1636 | else |
1680 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1637 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); |
1638 | list_del_init(&obj_priv->ring_list); | ||
1681 | 1639 | ||
1682 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | 1640 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); |
1683 | 1641 | ||
@@ -1699,12 +1657,11 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1699 | struct drm_i915_gem_object *obj_priv, *next; | 1657 | struct drm_i915_gem_object *obj_priv, *next; |
1700 | 1658 | ||
1701 | list_for_each_entry_safe(obj_priv, next, | 1659 | list_for_each_entry_safe(obj_priv, next, |
1702 | &dev_priv->mm.gpu_write_list, | 1660 | &ring->gpu_write_list, |
1703 | gpu_write_list) { | 1661 | gpu_write_list) { |
1704 | struct drm_gem_object *obj = &obj_priv->base; | 1662 | struct drm_gem_object *obj = &obj_priv->base; |
1705 | 1663 | ||
1706 | if (obj->write_domain & flush_domains && | 1664 | if (obj->write_domain & flush_domains) { |
1707 | obj_priv->ring == ring) { | ||
1708 | uint32_t old_write_domain = obj->write_domain; | 1665 | uint32_t old_write_domain = obj->write_domain; |
1709 | 1666 | ||
1710 | obj->write_domain = 0; | 1667 | obj->write_domain = 0; |
@@ -1826,7 +1783,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
1826 | 1783 | ||
1827 | obj_priv = list_first_entry(&ring->active_list, | 1784 | obj_priv = list_first_entry(&ring->active_list, |
1828 | struct drm_i915_gem_object, | 1785 | struct drm_i915_gem_object, |
1829 | list); | 1786 | ring_list); |
1830 | 1787 | ||
1831 | obj_priv->base.write_domain = 0; | 1788 | obj_priv->base.write_domain = 0; |
1832 | list_del_init(&obj_priv->gpu_write_list); | 1789 | list_del_init(&obj_priv->gpu_write_list); |
@@ -1841,8 +1798,8 @@ void i915_gem_reset(struct drm_device *dev) | |||
1841 | int i; | 1798 | int i; |
1842 | 1799 | ||
1843 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); | 1800 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); |
1844 | if (HAS_BSD(dev)) | 1801 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); |
1845 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); | 1802 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring); |
1846 | 1803 | ||
1847 | /* Remove anything from the flushing lists. The GPU cache is likely | 1804 | /* Remove anything from the flushing lists. The GPU cache is likely |
1848 | * to be lost on reset along with the data, so simply move the | 1805 | * to be lost on reset along with the data, so simply move the |
@@ -1851,7 +1808,7 @@ void i915_gem_reset(struct drm_device *dev) | |||
1851 | while (!list_empty(&dev_priv->mm.flushing_list)) { | 1808 | while (!list_empty(&dev_priv->mm.flushing_list)) { |
1852 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | 1809 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, |
1853 | struct drm_i915_gem_object, | 1810 | struct drm_i915_gem_object, |
1854 | list); | 1811 | mm_list); |
1855 | 1812 | ||
1856 | obj_priv->base.write_domain = 0; | 1813 | obj_priv->base.write_domain = 0; |
1857 | list_del_init(&obj_priv->gpu_write_list); | 1814 | list_del_init(&obj_priv->gpu_write_list); |
@@ -1863,7 +1820,7 @@ void i915_gem_reset(struct drm_device *dev) | |||
1863 | */ | 1820 | */ |
1864 | list_for_each_entry(obj_priv, | 1821 | list_for_each_entry(obj_priv, |
1865 | &dev_priv->mm.inactive_list, | 1822 | &dev_priv->mm.inactive_list, |
1866 | list) | 1823 | mm_list) |
1867 | { | 1824 | { |
1868 | obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | 1825 | obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
1869 | } | 1826 | } |
@@ -1923,7 +1880,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1923 | 1880 | ||
1924 | obj_priv = list_first_entry(&ring->active_list, | 1881 | obj_priv = list_first_entry(&ring->active_list, |
1925 | struct drm_i915_gem_object, | 1882 | struct drm_i915_gem_object, |
1926 | list); | 1883 | ring_list); |
1927 | 1884 | ||
1928 | if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) | 1885 | if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) |
1929 | break; | 1886 | break; |
@@ -1959,13 +1916,13 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
1959 | */ | 1916 | */ |
1960 | list_for_each_entry_safe(obj_priv, tmp, | 1917 | list_for_each_entry_safe(obj_priv, tmp, |
1961 | &dev_priv->mm.deferred_free_list, | 1918 | &dev_priv->mm.deferred_free_list, |
1962 | list) | 1919 | mm_list) |
1963 | i915_gem_free_object_tail(&obj_priv->base); | 1920 | i915_gem_free_object_tail(&obj_priv->base); |
1964 | } | 1921 | } |
1965 | 1922 | ||
1966 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); | 1923 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); |
1967 | if (HAS_BSD(dev)) | 1924 | i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); |
1968 | i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); | 1925 | i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring); |
1969 | } | 1926 | } |
1970 | 1927 | ||
1971 | static void | 1928 | static void |
@@ -1988,8 +1945,8 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1988 | 1945 | ||
1989 | if (!dev_priv->mm.suspended && | 1946 | if (!dev_priv->mm.suspended && |
1990 | (!list_empty(&dev_priv->render_ring.request_list) || | 1947 | (!list_empty(&dev_priv->render_ring.request_list) || |
1991 | (HAS_BSD(dev) && | 1948 | !list_empty(&dev_priv->bsd_ring.request_list) || |
1992 | !list_empty(&dev_priv->bsd_ring.request_list)))) | 1949 | !list_empty(&dev_priv->blt_ring.request_list))) |
1993 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 1950 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
1994 | mutex_unlock(&dev->struct_mutex); | 1951 | mutex_unlock(&dev->struct_mutex); |
1995 | } | 1952 | } |
@@ -2108,6 +2065,10 @@ i915_gem_flush(struct drm_device *dev, | |||
2108 | i915_gem_flush_ring(dev, file_priv, | 2065 | i915_gem_flush_ring(dev, file_priv, |
2109 | &dev_priv->bsd_ring, | 2066 | &dev_priv->bsd_ring, |
2110 | invalidate_domains, flush_domains); | 2067 | invalidate_domains, flush_domains); |
2068 | if (flush_rings & RING_BLT) | ||
2069 | i915_gem_flush_ring(dev, file_priv, | ||
2070 | &dev_priv->blt_ring, | ||
2071 | invalidate_domains, flush_domains); | ||
2111 | } | 2072 | } |
2112 | } | 2073 | } |
2113 | 2074 | ||
@@ -2194,10 +2155,11 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2194 | BUG_ON(obj_priv->pages_refcount); | 2155 | BUG_ON(obj_priv->pages_refcount); |
2195 | 2156 | ||
2196 | i915_gem_info_remove_gtt(dev_priv, obj->size); | 2157 | i915_gem_info_remove_gtt(dev_priv, obj->size); |
2197 | list_del_init(&obj_priv->list); | 2158 | list_del_init(&obj_priv->mm_list); |
2198 | 2159 | ||
2199 | drm_mm_put_block(obj_priv->gtt_space); | 2160 | drm_mm_put_block(obj_priv->gtt_space); |
2200 | obj_priv->gtt_space = NULL; | 2161 | obj_priv->gtt_space = NULL; |
2162 | obj_priv->gtt_offset = 0; | ||
2201 | 2163 | ||
2202 | if (i915_gem_object_is_purgeable(obj_priv)) | 2164 | if (i915_gem_object_is_purgeable(obj_priv)) |
2203 | i915_gem_object_truncate(obj); | 2165 | i915_gem_object_truncate(obj); |
@@ -2210,6 +2172,9 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2210 | static int i915_ring_idle(struct drm_device *dev, | 2172 | static int i915_ring_idle(struct drm_device *dev, |
2211 | struct intel_ring_buffer *ring) | 2173 | struct intel_ring_buffer *ring) |
2212 | { | 2174 | { |
2175 | if (list_empty(&ring->gpu_write_list)) | ||
2176 | return 0; | ||
2177 | |||
2213 | i915_gem_flush_ring(dev, NULL, ring, | 2178 | i915_gem_flush_ring(dev, NULL, ring, |
2214 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2179 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2215 | return i915_wait_request(dev, | 2180 | return i915_wait_request(dev, |
@@ -2226,8 +2191,8 @@ i915_gpu_idle(struct drm_device *dev) | |||
2226 | 2191 | ||
2227 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | 2192 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && |
2228 | list_empty(&dev_priv->render_ring.active_list) && | 2193 | list_empty(&dev_priv->render_ring.active_list) && |
2229 | (!HAS_BSD(dev) || | 2194 | list_empty(&dev_priv->bsd_ring.active_list) && |
2230 | list_empty(&dev_priv->bsd_ring.active_list))); | 2195 | list_empty(&dev_priv->blt_ring.active_list)); |
2231 | if (lists_empty) | 2196 | if (lists_empty) |
2232 | return 0; | 2197 | return 0; |
2233 | 2198 | ||
@@ -2236,11 +2201,13 @@ i915_gpu_idle(struct drm_device *dev) | |||
2236 | if (ret) | 2201 | if (ret) |
2237 | return ret; | 2202 | return ret; |
2238 | 2203 | ||
2239 | if (HAS_BSD(dev)) { | 2204 | ret = i915_ring_idle(dev, &dev_priv->bsd_ring); |
2240 | ret = i915_ring_idle(dev, &dev_priv->bsd_ring); | 2205 | if (ret) |
2241 | if (ret) | 2206 | return ret; |
2242 | return ret; | 2207 | |
2243 | } | 2208 | ret = i915_ring_idle(dev, &dev_priv->blt_ring); |
2209 | if (ret) | ||
2210 | return ret; | ||
2244 | 2211 | ||
2245 | return 0; | 2212 | return 0; |
2246 | } | 2213 | } |
@@ -2691,12 +2658,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2691 | search_free: | 2658 | search_free: |
2692 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | 2659 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, |
2693 | obj->size, alignment, 0); | 2660 | obj->size, alignment, 0); |
2694 | if (free_space != NULL) { | 2661 | if (free_space != NULL) |
2695 | obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, | 2662 | obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, |
2696 | alignment); | 2663 | alignment); |
2697 | if (obj_priv->gtt_space != NULL) | ||
2698 | obj_priv->gtt_offset = obj_priv->gtt_space->start; | ||
2699 | } | ||
2700 | if (obj_priv->gtt_space == NULL) { | 2664 | if (obj_priv->gtt_space == NULL) { |
2701 | /* If the gtt is empty and we're still having trouble | 2665 | /* If the gtt is empty and we're still having trouble |
2702 | * fitting our object in, we're out of memory. | 2666 | * fitting our object in, we're out of memory. |
@@ -2739,7 +2703,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2739 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 2703 | obj_priv->agp_mem = drm_agp_bind_pages(dev, |
2740 | obj_priv->pages, | 2704 | obj_priv->pages, |
2741 | obj->size >> PAGE_SHIFT, | 2705 | obj->size >> PAGE_SHIFT, |
2742 | obj_priv->gtt_offset, | 2706 | obj_priv->gtt_space->start, |
2743 | obj_priv->agp_type); | 2707 | obj_priv->agp_type); |
2744 | if (obj_priv->agp_mem == NULL) { | 2708 | if (obj_priv->agp_mem == NULL) { |
2745 | i915_gem_object_put_pages(obj); | 2709 | i915_gem_object_put_pages(obj); |
@@ -2754,7 +2718,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2754 | } | 2718 | } |
2755 | 2719 | ||
2756 | /* keep track of bounds object by adding it to the inactive list */ | 2720 | /* keep track of bounds object by adding it to the inactive list */ |
2757 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 2721 | list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); |
2758 | i915_gem_info_add_gtt(dev_priv, obj->size); | 2722 | i915_gem_info_add_gtt(dev_priv, obj->size); |
2759 | 2723 | ||
2760 | /* Assert that the object is not currently in any GPU domain. As it | 2724 | /* Assert that the object is not currently in any GPU domain. As it |
@@ -2764,6 +2728,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2764 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); | 2728 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); |
2765 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); | 2729 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); |
2766 | 2730 | ||
2731 | obj_priv->gtt_offset = obj_priv->gtt_space->start; | ||
2767 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); | 2732 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); |
2768 | 2733 | ||
2769 | return 0; | 2734 | return 0; |
@@ -3115,7 +3080,8 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
3115 | * drm_agp_chipset_flush | 3080 | * drm_agp_chipset_flush |
3116 | */ | 3081 | */ |
3117 | static void | 3082 | static void |
3118 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | 3083 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, |
3084 | struct intel_ring_buffer *ring) | ||
3119 | { | 3085 | { |
3120 | struct drm_device *dev = obj->dev; | 3086 | struct drm_device *dev = obj->dev; |
3121 | struct drm_i915_private *dev_priv = dev->dev_private; | 3087 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3124,9 +3090,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
3124 | uint32_t flush_domains = 0; | 3090 | uint32_t flush_domains = 0; |
3125 | uint32_t old_read_domains; | 3091 | uint32_t old_read_domains; |
3126 | 3092 | ||
3127 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); | ||
3128 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); | ||
3129 | |||
3130 | intel_mark_busy(dev, obj); | 3093 | intel_mark_busy(dev, obj); |
3131 | 3094 | ||
3132 | /* | 3095 | /* |
@@ -3172,8 +3135,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
3172 | 3135 | ||
3173 | dev->invalidate_domains |= invalidate_domains; | 3136 | dev->invalidate_domains |= invalidate_domains; |
3174 | dev->flush_domains |= flush_domains; | 3137 | dev->flush_domains |= flush_domains; |
3175 | if (obj_priv->ring) | 3138 | if (flush_domains & I915_GEM_GPU_DOMAINS) |
3176 | dev_priv->mm.flush_rings |= obj_priv->ring->id; | 3139 | dev_priv->mm.flush_rings |= obj_priv->ring->id; |
3140 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) | ||
3141 | dev_priv->mm.flush_rings |= ring->id; | ||
3177 | 3142 | ||
3178 | trace_i915_gem_object_change_domain(obj, | 3143 | trace_i915_gem_object_change_domain(obj, |
3179 | old_read_domains, | 3144 | old_read_domains, |
@@ -3289,68 +3254,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3289 | * Pin an object to the GTT and evaluate the relocations landing in it. | 3254 | * Pin an object to the GTT and evaluate the relocations landing in it. |
3290 | */ | 3255 | */ |
3291 | static int | 3256 | static int |
3292 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | 3257 | i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, |
3293 | struct drm_file *file_priv, | 3258 | struct drm_file *file_priv, |
3294 | struct drm_i915_gem_exec_object2 *entry, | 3259 | struct drm_i915_gem_exec_object2 *entry) |
3295 | struct drm_i915_gem_relocation_entry *relocs) | ||
3296 | { | 3260 | { |
3297 | struct drm_device *dev = obj->dev; | 3261 | struct drm_device *dev = obj->base.dev; |
3298 | drm_i915_private_t *dev_priv = dev->dev_private; | 3262 | drm_i915_private_t *dev_priv = dev->dev_private; |
3299 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3263 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3300 | int i, ret; | 3264 | struct drm_gem_object *target_obj = NULL; |
3301 | void __iomem *reloc_page; | 3265 | uint32_t target_handle = 0; |
3302 | bool need_fence; | 3266 | int i, ret = 0; |
3303 | |||
3304 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3305 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
3306 | |||
3307 | /* Check fence reg constraints and rebind if necessary */ | ||
3308 | if (need_fence && | ||
3309 | !i915_gem_object_fence_offset_ok(obj, | ||
3310 | obj_priv->tiling_mode)) { | ||
3311 | ret = i915_gem_object_unbind(obj); | ||
3312 | if (ret) | ||
3313 | return ret; | ||
3314 | } | ||
3315 | 3267 | ||
3316 | /* Choose the GTT offset for our buffer and put it there. */ | 3268 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; |
3317 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | 3269 | for (i = 0; i < entry->relocation_count; i++) { |
3318 | if (ret) | 3270 | struct drm_i915_gem_relocation_entry reloc; |
3319 | return ret; | 3271 | uint32_t target_offset; |
3320 | 3272 | ||
3321 | /* | 3273 | if (__copy_from_user_inatomic(&reloc, |
3322 | * Pre-965 chips need a fence register set up in order to | 3274 | user_relocs+i, |
3323 | * properly handle blits to/from tiled surfaces. | 3275 | sizeof(reloc))) { |
3324 | */ | 3276 | ret = -EFAULT; |
3325 | if (need_fence) { | 3277 | break; |
3326 | ret = i915_gem_object_get_fence_reg(obj, true); | ||
3327 | if (ret != 0) { | ||
3328 | i915_gem_object_unpin(obj); | ||
3329 | return ret; | ||
3330 | } | 3278 | } |
3331 | 3279 | ||
3332 | dev_priv->fence_regs[obj_priv->fence_reg].gpu = true; | 3280 | if (reloc.target_handle != target_handle) { |
3333 | } | 3281 | drm_gem_object_unreference(target_obj); |
3334 | 3282 | ||
3335 | entry->offset = obj_priv->gtt_offset; | 3283 | target_obj = drm_gem_object_lookup(dev, file_priv, |
3284 | reloc.target_handle); | ||
3285 | if (target_obj == NULL) { | ||
3286 | ret = -ENOENT; | ||
3287 | break; | ||
3288 | } | ||
3336 | 3289 | ||
3337 | /* Apply the relocations, using the GTT aperture to avoid cache | 3290 | target_handle = reloc.target_handle; |
3338 | * flushing requirements. | ||
3339 | */ | ||
3340 | for (i = 0; i < entry->relocation_count; i++) { | ||
3341 | struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; | ||
3342 | struct drm_gem_object *target_obj; | ||
3343 | struct drm_i915_gem_object *target_obj_priv; | ||
3344 | uint32_t reloc_val, reloc_offset; | ||
3345 | uint32_t __iomem *reloc_entry; | ||
3346 | |||
3347 | target_obj = drm_gem_object_lookup(obj->dev, file_priv, | ||
3348 | reloc->target_handle); | ||
3349 | if (target_obj == NULL) { | ||
3350 | i915_gem_object_unpin(obj); | ||
3351 | return -ENOENT; | ||
3352 | } | 3291 | } |
3353 | target_obj_priv = to_intel_bo(target_obj); | 3292 | target_offset = to_intel_bo(target_obj)->gtt_offset; |
3354 | 3293 | ||
3355 | #if WATCH_RELOC | 3294 | #if WATCH_RELOC |
3356 | DRM_INFO("%s: obj %p offset %08x target %d " | 3295 | DRM_INFO("%s: obj %p offset %08x target %d " |
@@ -3358,136 +3297,202 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3358 | "presumed %08x delta %08x\n", | 3297 | "presumed %08x delta %08x\n", |
3359 | __func__, | 3298 | __func__, |
3360 | obj, | 3299 | obj, |
3361 | (int) reloc->offset, | 3300 | (int) reloc.offset, |
3362 | (int) reloc->target_handle, | 3301 | (int) reloc.target_handle, |
3363 | (int) reloc->read_domains, | 3302 | (int) reloc.read_domains, |
3364 | (int) reloc->write_domain, | 3303 | (int) reloc.write_domain, |
3365 | (int) target_obj_priv->gtt_offset, | 3304 | (int) target_offset, |
3366 | (int) reloc->presumed_offset, | 3305 | (int) reloc.presumed_offset, |
3367 | reloc->delta); | 3306 | reloc.delta); |
3368 | #endif | 3307 | #endif |
3369 | 3308 | ||
3370 | /* The target buffer should have appeared before us in the | 3309 | /* The target buffer should have appeared before us in the |
3371 | * exec_object list, so it should have a GTT space bound by now. | 3310 | * exec_object list, so it should have a GTT space bound by now. |
3372 | */ | 3311 | */ |
3373 | if (target_obj_priv->gtt_space == NULL) { | 3312 | if (target_offset == 0) { |
3374 | DRM_ERROR("No GTT space found for object %d\n", | 3313 | DRM_ERROR("No GTT space found for object %d\n", |
3375 | reloc->target_handle); | 3314 | reloc.target_handle); |
3376 | drm_gem_object_unreference(target_obj); | 3315 | ret = -EINVAL; |
3377 | i915_gem_object_unpin(obj); | 3316 | break; |
3378 | return -EINVAL; | ||
3379 | } | 3317 | } |
3380 | 3318 | ||
3381 | /* Validate that the target is in a valid r/w GPU domain */ | 3319 | /* Validate that the target is in a valid r/w GPU domain */ |
3382 | if (reloc->write_domain & (reloc->write_domain - 1)) { | 3320 | if (reloc.write_domain & (reloc.write_domain - 1)) { |
3383 | DRM_ERROR("reloc with multiple write domains: " | 3321 | DRM_ERROR("reloc with multiple write domains: " |
3384 | "obj %p target %d offset %d " | 3322 | "obj %p target %d offset %d " |
3385 | "read %08x write %08x", | 3323 | "read %08x write %08x", |
3386 | obj, reloc->target_handle, | 3324 | obj, reloc.target_handle, |
3387 | (int) reloc->offset, | 3325 | (int) reloc.offset, |
3388 | reloc->read_domains, | 3326 | reloc.read_domains, |
3389 | reloc->write_domain); | 3327 | reloc.write_domain); |
3390 | drm_gem_object_unreference(target_obj); | 3328 | ret = -EINVAL; |
3391 | i915_gem_object_unpin(obj); | 3329 | break; |
3392 | return -EINVAL; | ||
3393 | } | 3330 | } |
3394 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | 3331 | if (reloc.write_domain & I915_GEM_DOMAIN_CPU || |
3395 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { | 3332 | reloc.read_domains & I915_GEM_DOMAIN_CPU) { |
3396 | DRM_ERROR("reloc with read/write CPU domains: " | 3333 | DRM_ERROR("reloc with read/write CPU domains: " |
3397 | "obj %p target %d offset %d " | 3334 | "obj %p target %d offset %d " |
3398 | "read %08x write %08x", | 3335 | "read %08x write %08x", |
3399 | obj, reloc->target_handle, | 3336 | obj, reloc.target_handle, |
3400 | (int) reloc->offset, | 3337 | (int) reloc.offset, |
3401 | reloc->read_domains, | 3338 | reloc.read_domains, |
3402 | reloc->write_domain); | 3339 | reloc.write_domain); |
3403 | drm_gem_object_unreference(target_obj); | 3340 | ret = -EINVAL; |
3404 | i915_gem_object_unpin(obj); | 3341 | break; |
3405 | return -EINVAL; | ||
3406 | } | 3342 | } |
3407 | if (reloc->write_domain && target_obj->pending_write_domain && | 3343 | if (reloc.write_domain && target_obj->pending_write_domain && |
3408 | reloc->write_domain != target_obj->pending_write_domain) { | 3344 | reloc.write_domain != target_obj->pending_write_domain) { |
3409 | DRM_ERROR("Write domain conflict: " | 3345 | DRM_ERROR("Write domain conflict: " |
3410 | "obj %p target %d offset %d " | 3346 | "obj %p target %d offset %d " |
3411 | "new %08x old %08x\n", | 3347 | "new %08x old %08x\n", |
3412 | obj, reloc->target_handle, | 3348 | obj, reloc.target_handle, |
3413 | (int) reloc->offset, | 3349 | (int) reloc.offset, |
3414 | reloc->write_domain, | 3350 | reloc.write_domain, |
3415 | target_obj->pending_write_domain); | 3351 | target_obj->pending_write_domain); |
3416 | drm_gem_object_unreference(target_obj); | 3352 | ret = -EINVAL; |
3417 | i915_gem_object_unpin(obj); | 3353 | break; |
3418 | return -EINVAL; | ||
3419 | } | 3354 | } |
3420 | 3355 | ||
3421 | target_obj->pending_read_domains |= reloc->read_domains; | 3356 | target_obj->pending_read_domains |= reloc.read_domains; |
3422 | target_obj->pending_write_domain |= reloc->write_domain; | 3357 | target_obj->pending_write_domain |= reloc.write_domain; |
3423 | 3358 | ||
3424 | /* If the relocation already has the right value in it, no | 3359 | /* If the relocation already has the right value in it, no |
3425 | * more work needs to be done. | 3360 | * more work needs to be done. |
3426 | */ | 3361 | */ |
3427 | if (target_obj_priv->gtt_offset == reloc->presumed_offset) { | 3362 | if (target_offset == reloc.presumed_offset) |
3428 | drm_gem_object_unreference(target_obj); | ||
3429 | continue; | 3363 | continue; |
3430 | } | ||
3431 | 3364 | ||
3432 | /* Check that the relocation address is valid... */ | 3365 | /* Check that the relocation address is valid... */ |
3433 | if (reloc->offset > obj->size - 4) { | 3366 | if (reloc.offset > obj->base.size - 4) { |
3434 | DRM_ERROR("Relocation beyond object bounds: " | 3367 | DRM_ERROR("Relocation beyond object bounds: " |
3435 | "obj %p target %d offset %d size %d.\n", | 3368 | "obj %p target %d offset %d size %d.\n", |
3436 | obj, reloc->target_handle, | 3369 | obj, reloc.target_handle, |
3437 | (int) reloc->offset, (int) obj->size); | 3370 | (int) reloc.offset, (int) obj->base.size); |
3438 | drm_gem_object_unreference(target_obj); | 3371 | ret = -EINVAL; |
3439 | i915_gem_object_unpin(obj); | 3372 | break; |
3440 | return -EINVAL; | ||
3441 | } | 3373 | } |
3442 | if (reloc->offset & 3) { | 3374 | if (reloc.offset & 3) { |
3443 | DRM_ERROR("Relocation not 4-byte aligned: " | 3375 | DRM_ERROR("Relocation not 4-byte aligned: " |
3444 | "obj %p target %d offset %d.\n", | 3376 | "obj %p target %d offset %d.\n", |
3445 | obj, reloc->target_handle, | 3377 | obj, reloc.target_handle, |
3446 | (int) reloc->offset); | 3378 | (int) reloc.offset); |
3447 | drm_gem_object_unreference(target_obj); | 3379 | ret = -EINVAL; |
3448 | i915_gem_object_unpin(obj); | 3380 | break; |
3449 | return -EINVAL; | ||
3450 | } | 3381 | } |
3451 | 3382 | ||
3452 | /* and points to somewhere within the target object. */ | 3383 | /* and points to somewhere within the target object. */ |
3453 | if (reloc->delta >= target_obj->size) { | 3384 | if (reloc.delta >= target_obj->size) { |
3454 | DRM_ERROR("Relocation beyond target object bounds: " | 3385 | DRM_ERROR("Relocation beyond target object bounds: " |
3455 | "obj %p target %d delta %d size %d.\n", | 3386 | "obj %p target %d delta %d size %d.\n", |
3456 | obj, reloc->target_handle, | 3387 | obj, reloc.target_handle, |
3457 | (int) reloc->delta, (int) target_obj->size); | 3388 | (int) reloc.delta, (int) target_obj->size); |
3458 | drm_gem_object_unreference(target_obj); | 3389 | ret = -EINVAL; |
3459 | i915_gem_object_unpin(obj); | 3390 | break; |
3460 | return -EINVAL; | ||
3461 | } | 3391 | } |
3462 | 3392 | ||
3463 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 3393 | reloc.delta += target_offset; |
3464 | if (ret != 0) { | 3394 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { |
3465 | drm_gem_object_unreference(target_obj); | 3395 | uint32_t page_offset = reloc.offset & ~PAGE_MASK; |
3466 | i915_gem_object_unpin(obj); | 3396 | char *vaddr; |
3467 | return ret; | 3397 | |
3398 | vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT], KM_USER0); | ||
3399 | *(uint32_t *)(vaddr + page_offset) = reloc.delta; | ||
3400 | kunmap_atomic(vaddr, KM_USER0); | ||
3401 | } else { | ||
3402 | uint32_t __iomem *reloc_entry; | ||
3403 | void __iomem *reloc_page; | ||
3404 | |||
3405 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); | ||
3406 | if (ret) | ||
3407 | break; | ||
3408 | |||
3409 | /* Map the page containing the relocation we're going to perform. */ | ||
3410 | reloc.offset += obj->gtt_offset; | ||
3411 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | ||
3412 | reloc.offset & PAGE_MASK, | ||
3413 | KM_USER0); | ||
3414 | reloc_entry = (uint32_t __iomem *) | ||
3415 | (reloc_page + (reloc.offset & ~PAGE_MASK)); | ||
3416 | iowrite32(reloc.delta, reloc_entry); | ||
3417 | io_mapping_unmap_atomic(reloc_page, KM_USER0); | ||
3468 | } | 3418 | } |
3469 | 3419 | ||
3470 | /* Map the page containing the relocation we're going to | 3420 | /* and update the user's relocation entry */ |
3471 | * perform. | 3421 | reloc.presumed_offset = target_offset; |
3472 | */ | 3422 | if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, |
3473 | reloc_offset = obj_priv->gtt_offset + reloc->offset; | 3423 | &reloc.presumed_offset, |
3474 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 3424 | sizeof(reloc.presumed_offset))) { |
3475 | (reloc_offset & | 3425 | ret = -EFAULT; |
3476 | ~(PAGE_SIZE - 1)), | 3426 | break; |
3477 | KM_USER0); | 3427 | } |
3478 | reloc_entry = (uint32_t __iomem *)(reloc_page + | 3428 | } |
3479 | (reloc_offset & (PAGE_SIZE - 1))); | 3429 | |
3480 | reloc_val = target_obj_priv->gtt_offset + reloc->delta; | 3430 | drm_gem_object_unreference(target_obj); |
3481 | 3431 | return ret; | |
3482 | writel(reloc_val, reloc_entry); | 3432 | } |
3483 | io_mapping_unmap_atomic(reloc_page, KM_USER0); | 3433 | |
3484 | 3434 | static int | |
3485 | /* The updated presumed offset for this entry will be | 3435 | i915_gem_execbuffer_pin(struct drm_device *dev, |
3486 | * copied back out to the user. | 3436 | struct drm_file *file, |
3487 | */ | 3437 | struct drm_gem_object **object_list, |
3488 | reloc->presumed_offset = target_obj_priv->gtt_offset; | 3438 | struct drm_i915_gem_exec_object2 *exec_list, |
3439 | int count) | ||
3440 | { | ||
3441 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3442 | int ret, i, retry; | ||
3443 | |||
3444 | /* attempt to pin all of the buffers into the GTT */ | ||
3445 | for (retry = 0; retry < 2; retry++) { | ||
3446 | ret = 0; | ||
3447 | for (i = 0; i < count; i++) { | ||
3448 | struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; | ||
3449 | struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]); | ||
3450 | bool need_fence = | ||
3451 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3452 | obj->tiling_mode != I915_TILING_NONE; | ||
3453 | |||
3454 | /* Check fence reg constraints and rebind if necessary */ | ||
3455 | if (need_fence && | ||
3456 | !i915_gem_object_fence_offset_ok(&obj->base, | ||
3457 | obj->tiling_mode)) { | ||
3458 | ret = i915_gem_object_unbind(&obj->base); | ||
3459 | if (ret) | ||
3460 | break; | ||
3461 | } | ||
3462 | |||
3463 | ret = i915_gem_object_pin(&obj->base, entry->alignment); | ||
3464 | if (ret) | ||
3465 | break; | ||
3466 | |||
3467 | /* | ||
3468 | * Pre-965 chips need a fence register set up in order | ||
3469 | * to properly handle blits to/from tiled surfaces. | ||
3470 | */ | ||
3471 | if (need_fence) { | ||
3472 | ret = i915_gem_object_get_fence_reg(&obj->base, true); | ||
3473 | if (ret) { | ||
3474 | i915_gem_object_unpin(&obj->base); | ||
3475 | break; | ||
3476 | } | ||
3489 | 3477 | ||
3490 | drm_gem_object_unreference(target_obj); | 3478 | dev_priv->fence_regs[obj->fence_reg].gpu = true; |
3479 | } | ||
3480 | |||
3481 | entry->offset = obj->gtt_offset; | ||
3482 | } | ||
3483 | |||
3484 | while (i--) | ||
3485 | i915_gem_object_unpin(object_list[i]); | ||
3486 | |||
3487 | if (ret == 0) | ||
3488 | break; | ||
3489 | |||
3490 | if (ret != -ENOSPC || retry) | ||
3491 | return ret; | ||
3492 | |||
3493 | ret = i915_gem_evict_everything(dev); | ||
3494 | if (ret) | ||
3495 | return ret; | ||
3491 | } | 3496 | } |
3492 | 3497 | ||
3493 | return 0; | 3498 | return 0; |
@@ -3551,86 +3556,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
3551 | } | 3556 | } |
3552 | 3557 | ||
3553 | static int | 3558 | static int |
3554 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, | 3559 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec, |
3555 | uint32_t buffer_count, | 3560 | uint64_t exec_offset) |
3556 | struct drm_i915_gem_relocation_entry **relocs) | ||
3557 | { | ||
3558 | uint32_t reloc_count = 0, reloc_index = 0, i; | ||
3559 | int ret; | ||
3560 | |||
3561 | *relocs = NULL; | ||
3562 | for (i = 0; i < buffer_count; i++) { | ||
3563 | if (reloc_count + exec_list[i].relocation_count < reloc_count) | ||
3564 | return -EINVAL; | ||
3565 | reloc_count += exec_list[i].relocation_count; | ||
3566 | } | ||
3567 | |||
3568 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); | ||
3569 | if (*relocs == NULL) { | ||
3570 | DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); | ||
3571 | return -ENOMEM; | ||
3572 | } | ||
3573 | |||
3574 | for (i = 0; i < buffer_count; i++) { | ||
3575 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
3576 | |||
3577 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | ||
3578 | |||
3579 | ret = copy_from_user(&(*relocs)[reloc_index], | ||
3580 | user_relocs, | ||
3581 | exec_list[i].relocation_count * | ||
3582 | sizeof(**relocs)); | ||
3583 | if (ret != 0) { | ||
3584 | drm_free_large(*relocs); | ||
3585 | *relocs = NULL; | ||
3586 | return -EFAULT; | ||
3587 | } | ||
3588 | |||
3589 | reloc_index += exec_list[i].relocation_count; | ||
3590 | } | ||
3591 | |||
3592 | return 0; | ||
3593 | } | ||
3594 | |||
3595 | static int | ||
3596 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, | ||
3597 | uint32_t buffer_count, | ||
3598 | struct drm_i915_gem_relocation_entry *relocs) | ||
3599 | { | ||
3600 | uint32_t reloc_count = 0, i; | ||
3601 | int ret = 0; | ||
3602 | |||
3603 | if (relocs == NULL) | ||
3604 | return 0; | ||
3605 | |||
3606 | for (i = 0; i < buffer_count; i++) { | ||
3607 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
3608 | int unwritten; | ||
3609 | |||
3610 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | ||
3611 | |||
3612 | unwritten = copy_to_user(user_relocs, | ||
3613 | &relocs[reloc_count], | ||
3614 | exec_list[i].relocation_count * | ||
3615 | sizeof(*relocs)); | ||
3616 | |||
3617 | if (unwritten) { | ||
3618 | ret = -EFAULT; | ||
3619 | goto err; | ||
3620 | } | ||
3621 | |||
3622 | reloc_count += exec_list[i].relocation_count; | ||
3623 | } | ||
3624 | |||
3625 | err: | ||
3626 | drm_free_large(relocs); | ||
3627 | |||
3628 | return ret; | ||
3629 | } | ||
3630 | |||
3631 | static int | ||
3632 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, | ||
3633 | uint64_t exec_offset) | ||
3634 | { | 3561 | { |
3635 | uint32_t exec_start, exec_len; | 3562 | uint32_t exec_start, exec_len; |
3636 | 3563 | ||
@@ -3647,43 +3574,32 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, | |||
3647 | } | 3574 | } |
3648 | 3575 | ||
3649 | static int | 3576 | static int |
3650 | i915_gem_wait_for_pending_flip(struct drm_device *dev, | 3577 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, |
3651 | struct drm_gem_object **object_list, | 3578 | int count) |
3652 | int count) | ||
3653 | { | 3579 | { |
3654 | drm_i915_private_t *dev_priv = dev->dev_private; | 3580 | int i; |
3655 | struct drm_i915_gem_object *obj_priv; | ||
3656 | DEFINE_WAIT(wait); | ||
3657 | int i, ret = 0; | ||
3658 | 3581 | ||
3659 | for (;;) { | 3582 | for (i = 0; i < count; i++) { |
3660 | prepare_to_wait(&dev_priv->pending_flip_queue, | 3583 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; |
3661 | &wait, TASK_INTERRUPTIBLE); | 3584 | size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); |
3662 | for (i = 0; i < count; i++) { | ||
3663 | obj_priv = to_intel_bo(object_list[i]); | ||
3664 | if (atomic_read(&obj_priv->pending_flip) > 0) | ||
3665 | break; | ||
3666 | } | ||
3667 | if (i == count) | ||
3668 | break; | ||
3669 | 3585 | ||
3670 | if (!signal_pending(current)) { | 3586 | if (!access_ok(VERIFY_READ, ptr, length)) |
3671 | mutex_unlock(&dev->struct_mutex); | 3587 | return -EFAULT; |
3672 | schedule(); | 3588 | |
3673 | mutex_lock(&dev->struct_mutex); | 3589 | /* we may also need to update the presumed offsets */ |
3674 | continue; | 3590 | if (!access_ok(VERIFY_WRITE, ptr, length)) |
3675 | } | 3591 | return -EFAULT; |
3676 | ret = -ERESTARTSYS; | 3592 | |
3677 | break; | 3593 | if (fault_in_pages_readable(ptr, length)) |
3594 | return -EFAULT; | ||
3678 | } | 3595 | } |
3679 | finish_wait(&dev_priv->pending_flip_queue, &wait); | ||
3680 | 3596 | ||
3681 | return ret; | 3597 | return 0; |
3682 | } | 3598 | } |
3683 | 3599 | ||
3684 | static int | 3600 | static int |
3685 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 3601 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3686 | struct drm_file *file_priv, | 3602 | struct drm_file *file, |
3687 | struct drm_i915_gem_execbuffer2 *args, | 3603 | struct drm_i915_gem_execbuffer2 *args, |
3688 | struct drm_i915_gem_exec_object2 *exec_list) | 3604 | struct drm_i915_gem_exec_object2 *exec_list) |
3689 | { | 3605 | { |
@@ -3692,12 +3608,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3692 | struct drm_gem_object *batch_obj; | 3608 | struct drm_gem_object *batch_obj; |
3693 | struct drm_i915_gem_object *obj_priv; | 3609 | struct drm_i915_gem_object *obj_priv; |
3694 | struct drm_clip_rect *cliprects = NULL; | 3610 | struct drm_clip_rect *cliprects = NULL; |
3695 | struct drm_i915_gem_relocation_entry *relocs = NULL; | ||
3696 | struct drm_i915_gem_request *request = NULL; | 3611 | struct drm_i915_gem_request *request = NULL; |
3697 | int ret, ret2, i, pinned = 0; | 3612 | int ret, i, flips; |
3698 | uint64_t exec_offset; | 3613 | uint64_t exec_offset; |
3699 | uint32_t reloc_index; | ||
3700 | int pin_tries, flips; | ||
3701 | 3614 | ||
3702 | struct intel_ring_buffer *ring = NULL; | 3615 | struct intel_ring_buffer *ring = NULL; |
3703 | 3616 | ||
@@ -3705,18 +3618,37 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3705 | if (ret) | 3618 | if (ret) |
3706 | return ret; | 3619 | return ret; |
3707 | 3620 | ||
3621 | ret = validate_exec_list(exec_list, args->buffer_count); | ||
3622 | if (ret) | ||
3623 | return ret; | ||
3624 | |||
3708 | #if WATCH_EXEC | 3625 | #if WATCH_EXEC |
3709 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 3626 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", |
3710 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | 3627 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); |
3711 | #endif | 3628 | #endif |
3712 | if (args->flags & I915_EXEC_BSD) { | 3629 | switch (args->flags & I915_EXEC_RING_MASK) { |
3630 | case I915_EXEC_DEFAULT: | ||
3631 | case I915_EXEC_RENDER: | ||
3632 | ring = &dev_priv->render_ring; | ||
3633 | break; | ||
3634 | case I915_EXEC_BSD: | ||
3713 | if (!HAS_BSD(dev)) { | 3635 | if (!HAS_BSD(dev)) { |
3714 | DRM_ERROR("execbuf with wrong flag\n"); | 3636 | DRM_ERROR("execbuf with invalid ring (BSD)\n"); |
3715 | return -EINVAL; | 3637 | return -EINVAL; |
3716 | } | 3638 | } |
3717 | ring = &dev_priv->bsd_ring; | 3639 | ring = &dev_priv->bsd_ring; |
3718 | } else { | 3640 | break; |
3719 | ring = &dev_priv->render_ring; | 3641 | case I915_EXEC_BLT: |
3642 | if (!HAS_BLT(dev)) { | ||
3643 | DRM_ERROR("execbuf with invalid ring (BLT)\n"); | ||
3644 | return -EINVAL; | ||
3645 | } | ||
3646 | ring = &dev_priv->blt_ring; | ||
3647 | break; | ||
3648 | default: | ||
3649 | DRM_ERROR("execbuf with unknown ring: %d\n", | ||
3650 | (int)(args->flags & I915_EXEC_RING_MASK)); | ||
3651 | return -EINVAL; | ||
3720 | } | 3652 | } |
3721 | 3653 | ||
3722 | if (args->buffer_count < 1) { | 3654 | if (args->buffer_count < 1) { |
@@ -3757,11 +3689,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3757 | goto pre_mutex_err; | 3689 | goto pre_mutex_err; |
3758 | } | 3690 | } |
3759 | 3691 | ||
3760 | ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, | ||
3761 | &relocs); | ||
3762 | if (ret != 0) | ||
3763 | goto pre_mutex_err; | ||
3764 | |||
3765 | ret = i915_mutex_lock_interruptible(dev); | 3692 | ret = i915_mutex_lock_interruptible(dev); |
3766 | if (ret) | 3693 | if (ret) |
3767 | goto pre_mutex_err; | 3694 | goto pre_mutex_err; |
@@ -3773,9 +3700,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3773 | } | 3700 | } |
3774 | 3701 | ||
3775 | /* Look up object handles */ | 3702 | /* Look up object handles */ |
3776 | flips = 0; | ||
3777 | for (i = 0; i < args->buffer_count; i++) { | 3703 | for (i = 0; i < args->buffer_count; i++) { |
3778 | object_list[i] = drm_gem_object_lookup(dev, file_priv, | 3704 | object_list[i] = drm_gem_object_lookup(dev, file, |
3779 | exec_list[i].handle); | 3705 | exec_list[i].handle); |
3780 | if (object_list[i] == NULL) { | 3706 | if (object_list[i] == NULL) { |
3781 | DRM_ERROR("Invalid object handle %d at index %d\n", | 3707 | DRM_ERROR("Invalid object handle %d at index %d\n", |
@@ -3796,76 +3722,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3796 | goto err; | 3722 | goto err; |
3797 | } | 3723 | } |
3798 | obj_priv->in_execbuffer = true; | 3724 | obj_priv->in_execbuffer = true; |
3799 | flips += atomic_read(&obj_priv->pending_flip); | ||
3800 | } | 3725 | } |
3801 | 3726 | ||
3802 | if (flips > 0) { | 3727 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
3803 | ret = i915_gem_wait_for_pending_flip(dev, object_list, | 3728 | ret = i915_gem_execbuffer_pin(dev, file, |
3804 | args->buffer_count); | 3729 | object_list, exec_list, |
3805 | if (ret) | 3730 | args->buffer_count); |
3806 | goto err; | 3731 | if (ret) |
3807 | } | 3732 | goto err; |
3808 | |||
3809 | /* Pin and relocate */ | ||
3810 | for (pin_tries = 0; ; pin_tries++) { | ||
3811 | ret = 0; | ||
3812 | reloc_index = 0; | ||
3813 | |||
3814 | for (i = 0; i < args->buffer_count; i++) { | ||
3815 | object_list[i]->pending_read_domains = 0; | ||
3816 | object_list[i]->pending_write_domain = 0; | ||
3817 | ret = i915_gem_object_pin_and_relocate(object_list[i], | ||
3818 | file_priv, | ||
3819 | &exec_list[i], | ||
3820 | &relocs[reloc_index]); | ||
3821 | if (ret) | ||
3822 | break; | ||
3823 | pinned = i + 1; | ||
3824 | reloc_index += exec_list[i].relocation_count; | ||
3825 | } | ||
3826 | /* success */ | ||
3827 | if (ret == 0) | ||
3828 | break; | ||
3829 | |||
3830 | /* error other than GTT full, or we've already tried again */ | ||
3831 | if (ret != -ENOSPC || pin_tries >= 1) { | ||
3832 | if (ret != -ERESTARTSYS) { | ||
3833 | unsigned long long total_size = 0; | ||
3834 | int num_fences = 0; | ||
3835 | for (i = 0; i < args->buffer_count; i++) { | ||
3836 | obj_priv = to_intel_bo(object_list[i]); | ||
3837 | |||
3838 | total_size += object_list[i]->size; | ||
3839 | num_fences += | ||
3840 | exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3841 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
3842 | } | ||
3843 | DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", | ||
3844 | pinned+1, args->buffer_count, | ||
3845 | total_size, num_fences, | ||
3846 | ret); | ||
3847 | DRM_ERROR("%u objects [%u pinned, %u GTT], " | ||
3848 | "%zu object bytes [%zu pinned], " | ||
3849 | "%zu /%zu gtt bytes\n", | ||
3850 | dev_priv->mm.object_count, | ||
3851 | dev_priv->mm.pin_count, | ||
3852 | dev_priv->mm.gtt_count, | ||
3853 | dev_priv->mm.object_memory, | ||
3854 | dev_priv->mm.pin_memory, | ||
3855 | dev_priv->mm.gtt_memory, | ||
3856 | dev_priv->mm.gtt_total); | ||
3857 | } | ||
3858 | goto err; | ||
3859 | } | ||
3860 | |||
3861 | /* unpin all of our buffers */ | ||
3862 | for (i = 0; i < pinned; i++) | ||
3863 | i915_gem_object_unpin(object_list[i]); | ||
3864 | pinned = 0; | ||
3865 | 3733 | ||
3866 | /* evict everyone we can from the aperture */ | 3734 | /* The objects are in their final locations, apply the relocations. */ |
3867 | ret = i915_gem_evict_everything(dev); | 3735 | for (i = 0; i < args->buffer_count; i++) { |
3868 | if (ret && ret != -ENOSPC) | 3736 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); |
3737 | obj->base.pending_read_domains = 0; | ||
3738 | obj->base.pending_write_domain = 0; | ||
3739 | ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); | ||
3740 | if (ret) | ||
3869 | goto err; | 3741 | goto err; |
3870 | } | 3742 | } |
3871 | 3743 | ||
@@ -3878,9 +3750,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3878 | } | 3750 | } |
3879 | batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | 3751 | batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
3880 | 3752 | ||
3881 | /* Sanity check the batch buffer, prior to moving objects */ | 3753 | /* Sanity check the batch buffer */ |
3882 | exec_offset = exec_list[args->buffer_count - 1].offset; | 3754 | exec_offset = to_intel_bo(batch_obj)->gtt_offset; |
3883 | ret = i915_gem_check_execbuffer (args, exec_offset); | 3755 | ret = i915_gem_check_execbuffer(args, exec_offset); |
3884 | if (ret != 0) { | 3756 | if (ret != 0) { |
3885 | DRM_ERROR("execbuf with invalid offset/length\n"); | 3757 | DRM_ERROR("execbuf with invalid offset/length\n"); |
3886 | goto err; | 3758 | goto err; |
@@ -3898,7 +3770,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3898 | struct drm_gem_object *obj = object_list[i]; | 3770 | struct drm_gem_object *obj = object_list[i]; |
3899 | 3771 | ||
3900 | /* Compute new gpu domains and update invalidate/flush */ | 3772 | /* Compute new gpu domains and update invalidate/flush */ |
3901 | i915_gem_object_set_to_gpu_domain(obj); | 3773 | i915_gem_object_set_to_gpu_domain(obj, ring); |
3902 | } | 3774 | } |
3903 | 3775 | ||
3904 | if (dev->invalidate_domains | dev->flush_domains) { | 3776 | if (dev->invalidate_domains | dev->flush_domains) { |
@@ -3908,7 +3780,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3908 | dev->invalidate_domains, | 3780 | dev->invalidate_domains, |
3909 | dev->flush_domains); | 3781 | dev->flush_domains); |
3910 | #endif | 3782 | #endif |
3911 | i915_gem_flush(dev, file_priv, | 3783 | i915_gem_flush(dev, file, |
3912 | dev->invalidate_domains, | 3784 | dev->invalidate_domains, |
3913 | dev->flush_domains, | 3785 | dev->flush_domains, |
3914 | dev_priv->mm.flush_rings); | 3786 | dev_priv->mm.flush_rings); |
@@ -3916,14 +3788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3916 | 3788 | ||
3917 | for (i = 0; i < args->buffer_count; i++) { | 3789 | for (i = 0; i < args->buffer_count; i++) { |
3918 | struct drm_gem_object *obj = object_list[i]; | 3790 | struct drm_gem_object *obj = object_list[i]; |
3919 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3920 | uint32_t old_write_domain = obj->write_domain; | 3791 | uint32_t old_write_domain = obj->write_domain; |
3921 | |||
3922 | obj->write_domain = obj->pending_write_domain; | 3792 | obj->write_domain = obj->pending_write_domain; |
3923 | if (obj->write_domain) | ||
3924 | list_move_tail(&obj_priv->gpu_write_list, | ||
3925 | &dev_priv->mm.gpu_write_list); | ||
3926 | |||
3927 | trace_i915_gem_object_change_domain(obj, | 3793 | trace_i915_gem_object_change_domain(obj, |
3928 | obj->read_domains, | 3794 | obj->read_domains, |
3929 | old_write_domain); | 3795 | old_write_domain); |
@@ -3943,9 +3809,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3943 | ~0); | 3809 | ~0); |
3944 | #endif | 3810 | #endif |
3945 | 3811 | ||
3812 | /* Check for any pending flips. As we only maintain a flip queue depth | ||
3813 | * of 1, we can simply insert a WAIT for the next display flip prior | ||
3814 | * to executing the batch and avoid stalling the CPU. | ||
3815 | */ | ||
3816 | flips = 0; | ||
3817 | for (i = 0; i < args->buffer_count; i++) { | ||
3818 | if (object_list[i]->write_domain) | ||
3819 | flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); | ||
3820 | } | ||
3821 | if (flips) { | ||
3822 | int plane, flip_mask; | ||
3823 | |||
3824 | for (plane = 0; flips >> plane; plane++) { | ||
3825 | if (((flips >> plane) & 1) == 0) | ||
3826 | continue; | ||
3827 | |||
3828 | if (plane) | ||
3829 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
3830 | else | ||
3831 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
3832 | |||
3833 | intel_ring_begin(dev, ring, 2); | ||
3834 | intel_ring_emit(dev, ring, | ||
3835 | MI_WAIT_FOR_EVENT | flip_mask); | ||
3836 | intel_ring_emit(dev, ring, MI_NOOP); | ||
3837 | intel_ring_advance(dev, ring); | ||
3838 | } | ||
3839 | } | ||
3840 | |||
3946 | /* Exec the batchbuffer */ | 3841 | /* Exec the batchbuffer */ |
3947 | ret = ring->dispatch_gem_execbuffer(dev, ring, args, | 3842 | ret = ring->dispatch_gem_execbuffer(dev, ring, args, |
3948 | cliprects, exec_offset); | 3843 | cliprects, exec_offset); |
3949 | if (ret) { | 3844 | if (ret) { |
3950 | DRM_ERROR("dispatch failed %d\n", ret); | 3845 | DRM_ERROR("dispatch failed %d\n", ret); |
3951 | goto err; | 3846 | goto err; |
@@ -3959,18 +3854,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3959 | 3854 | ||
3960 | for (i = 0; i < args->buffer_count; i++) { | 3855 | for (i = 0; i < args->buffer_count; i++) { |
3961 | struct drm_gem_object *obj = object_list[i]; | 3856 | struct drm_gem_object *obj = object_list[i]; |
3962 | obj_priv = to_intel_bo(obj); | ||
3963 | 3857 | ||
3964 | i915_gem_object_move_to_active(obj, ring); | 3858 | i915_gem_object_move_to_active(obj, ring); |
3859 | if (obj->write_domain) | ||
3860 | list_move_tail(&to_intel_bo(obj)->gpu_write_list, | ||
3861 | &ring->gpu_write_list); | ||
3965 | } | 3862 | } |
3966 | 3863 | ||
3967 | i915_add_request(dev, file_priv, request, ring); | 3864 | i915_add_request(dev, file, request, ring); |
3968 | request = NULL; | 3865 | request = NULL; |
3969 | 3866 | ||
3970 | err: | 3867 | err: |
3971 | for (i = 0; i < pinned; i++) | ||
3972 | i915_gem_object_unpin(object_list[i]); | ||
3973 | |||
3974 | for (i = 0; i < args->buffer_count; i++) { | 3868 | for (i = 0; i < args->buffer_count; i++) { |
3975 | if (object_list[i]) { | 3869 | if (object_list[i]) { |
3976 | obj_priv = to_intel_bo(object_list[i]); | 3870 | obj_priv = to_intel_bo(object_list[i]); |
@@ -3982,20 +3876,6 @@ err: | |||
3982 | mutex_unlock(&dev->struct_mutex); | 3876 | mutex_unlock(&dev->struct_mutex); |
3983 | 3877 | ||
3984 | pre_mutex_err: | 3878 | pre_mutex_err: |
3985 | /* Copy the updated relocations out regardless of current error | ||
3986 | * state. Failure to update the relocs would mean that the next | ||
3987 | * time userland calls execbuf, it would do so with presumed offset | ||
3988 | * state that didn't match the actual object state. | ||
3989 | */ | ||
3990 | ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, | ||
3991 | relocs); | ||
3992 | if (ret2 != 0) { | ||
3993 | DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); | ||
3994 | |||
3995 | if (ret == 0) | ||
3996 | ret = ret2; | ||
3997 | } | ||
3998 | |||
3999 | drm_free_large(object_list); | 3879 | drm_free_large(object_list); |
4000 | kfree(cliprects); | 3880 | kfree(cliprects); |
4001 | kfree(request); | 3881 | kfree(request); |
@@ -4187,7 +4067,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
4187 | if (obj_priv->pin_count == 1) { | 4067 | if (obj_priv->pin_count == 1) { |
4188 | i915_gem_info_add_pin(dev_priv, obj->size); | 4068 | i915_gem_info_add_pin(dev_priv, obj->size); |
4189 | if (!obj_priv->active) | 4069 | if (!obj_priv->active) |
4190 | list_move_tail(&obj_priv->list, | 4070 | list_move_tail(&obj_priv->mm_list, |
4191 | &dev_priv->mm.pinned_list); | 4071 | &dev_priv->mm.pinned_list); |
4192 | } | 4072 | } |
4193 | 4073 | ||
@@ -4213,7 +4093,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) | |||
4213 | */ | 4093 | */ |
4214 | if (obj_priv->pin_count == 0) { | 4094 | if (obj_priv->pin_count == 0) { |
4215 | if (!obj_priv->active) | 4095 | if (!obj_priv->active) |
4216 | list_move_tail(&obj_priv->list, | 4096 | list_move_tail(&obj_priv->mm_list, |
4217 | &dev_priv->mm.inactive_list); | 4097 | &dev_priv->mm.inactive_list); |
4218 | i915_gem_info_remove_pin(dev_priv, obj->size); | 4098 | i915_gem_info_remove_pin(dev_priv, obj->size); |
4219 | } | 4099 | } |
@@ -4229,44 +4109,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
4229 | struct drm_i915_gem_object *obj_priv; | 4109 | struct drm_i915_gem_object *obj_priv; |
4230 | int ret; | 4110 | int ret; |
4231 | 4111 | ||
4112 | ret = i915_mutex_lock_interruptible(dev); | ||
4113 | if (ret) | ||
4114 | return ret; | ||
4115 | |||
4232 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4116 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
4233 | if (obj == NULL) { | 4117 | if (obj == NULL) { |
4234 | DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", | 4118 | ret = -ENOENT; |
4235 | args->handle); | 4119 | goto unlock; |
4236 | return -ENOENT; | ||
4237 | } | 4120 | } |
4238 | obj_priv = to_intel_bo(obj); | 4121 | obj_priv = to_intel_bo(obj); |
4239 | 4122 | ||
4240 | ret = i915_mutex_lock_interruptible(dev); | ||
4241 | if (ret) { | ||
4242 | drm_gem_object_unreference_unlocked(obj); | ||
4243 | return ret; | ||
4244 | } | ||
4245 | |||
4246 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 4123 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
4247 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 4124 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
4248 | drm_gem_object_unreference(obj); | 4125 | ret = -EINVAL; |
4249 | mutex_unlock(&dev->struct_mutex); | 4126 | goto out; |
4250 | return -EINVAL; | ||
4251 | } | 4127 | } |
4252 | 4128 | ||
4253 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 4129 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { |
4254 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 4130 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
4255 | args->handle); | 4131 | args->handle); |
4256 | drm_gem_object_unreference(obj); | 4132 | ret = -EINVAL; |
4257 | mutex_unlock(&dev->struct_mutex); | 4133 | goto out; |
4258 | return -EINVAL; | ||
4259 | } | 4134 | } |
4260 | 4135 | ||
4261 | obj_priv->user_pin_count++; | 4136 | obj_priv->user_pin_count++; |
4262 | obj_priv->pin_filp = file_priv; | 4137 | obj_priv->pin_filp = file_priv; |
4263 | if (obj_priv->user_pin_count == 1) { | 4138 | if (obj_priv->user_pin_count == 1) { |
4264 | ret = i915_gem_object_pin(obj, args->alignment); | 4139 | ret = i915_gem_object_pin(obj, args->alignment); |
4265 | if (ret != 0) { | 4140 | if (ret) |
4266 | drm_gem_object_unreference(obj); | 4141 | goto out; |
4267 | mutex_unlock(&dev->struct_mutex); | ||
4268 | return ret; | ||
4269 | } | ||
4270 | } | 4142 | } |
4271 | 4143 | ||
4272 | /* XXX - flush the CPU caches for pinned objects | 4144 | /* XXX - flush the CPU caches for pinned objects |
@@ -4274,10 +4146,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
4274 | */ | 4146 | */ |
4275 | i915_gem_object_flush_cpu_write_domain(obj); | 4147 | i915_gem_object_flush_cpu_write_domain(obj); |
4276 | args->offset = obj_priv->gtt_offset; | 4148 | args->offset = obj_priv->gtt_offset; |
4149 | out: | ||
4277 | drm_gem_object_unreference(obj); | 4150 | drm_gem_object_unreference(obj); |
4151 | unlock: | ||
4278 | mutex_unlock(&dev->struct_mutex); | 4152 | mutex_unlock(&dev->struct_mutex); |
4279 | 4153 | return ret; | |
4280 | return 0; | ||
4281 | } | 4154 | } |
4282 | 4155 | ||
4283 | int | 4156 | int |
@@ -4289,27 +4162,22 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
4289 | struct drm_i915_gem_object *obj_priv; | 4162 | struct drm_i915_gem_object *obj_priv; |
4290 | int ret; | 4163 | int ret; |
4291 | 4164 | ||
4165 | ret = i915_mutex_lock_interruptible(dev); | ||
4166 | if (ret) | ||
4167 | return ret; | ||
4168 | |||
4292 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4169 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
4293 | if (obj == NULL) { | 4170 | if (obj == NULL) { |
4294 | DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", | 4171 | ret = -ENOENT; |
4295 | args->handle); | 4172 | goto unlock; |
4296 | return -ENOENT; | ||
4297 | } | 4173 | } |
4298 | |||
4299 | obj_priv = to_intel_bo(obj); | 4174 | obj_priv = to_intel_bo(obj); |
4300 | 4175 | ||
4301 | ret = i915_mutex_lock_interruptible(dev); | ||
4302 | if (ret) { | ||
4303 | drm_gem_object_unreference_unlocked(obj); | ||
4304 | return ret; | ||
4305 | } | ||
4306 | |||
4307 | if (obj_priv->pin_filp != file_priv) { | 4176 | if (obj_priv->pin_filp != file_priv) { |
4308 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 4177 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
4309 | args->handle); | 4178 | args->handle); |
4310 | drm_gem_object_unreference(obj); | 4179 | ret = -EINVAL; |
4311 | mutex_unlock(&dev->struct_mutex); | 4180 | goto out; |
4312 | return -EINVAL; | ||
4313 | } | 4181 | } |
4314 | obj_priv->user_pin_count--; | 4182 | obj_priv->user_pin_count--; |
4315 | if (obj_priv->user_pin_count == 0) { | 4183 | if (obj_priv->user_pin_count == 0) { |
@@ -4317,9 +4185,11 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
4317 | i915_gem_object_unpin(obj); | 4185 | i915_gem_object_unpin(obj); |
4318 | } | 4186 | } |
4319 | 4187 | ||
4188 | out: | ||
4320 | drm_gem_object_unreference(obj); | 4189 | drm_gem_object_unreference(obj); |
4190 | unlock: | ||
4321 | mutex_unlock(&dev->struct_mutex); | 4191 | mutex_unlock(&dev->struct_mutex); |
4322 | return 0; | 4192 | return ret; |
4323 | } | 4193 | } |
4324 | 4194 | ||
4325 | int | 4195 | int |
@@ -4331,25 +4201,22 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4331 | struct drm_i915_gem_object *obj_priv; | 4201 | struct drm_i915_gem_object *obj_priv; |
4332 | int ret; | 4202 | int ret; |
4333 | 4203 | ||
4334 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
4335 | if (obj == NULL) { | ||
4336 | DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", | ||
4337 | args->handle); | ||
4338 | return -ENOENT; | ||
4339 | } | ||
4340 | |||
4341 | ret = i915_mutex_lock_interruptible(dev); | 4204 | ret = i915_mutex_lock_interruptible(dev); |
4342 | if (ret) { | 4205 | if (ret) |
4343 | drm_gem_object_unreference_unlocked(obj); | ||
4344 | return ret; | 4206 | return ret; |
4207 | |||
4208 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
4209 | if (obj == NULL) { | ||
4210 | ret = -ENOENT; | ||
4211 | goto unlock; | ||
4345 | } | 4212 | } |
4213 | obj_priv = to_intel_bo(obj); | ||
4346 | 4214 | ||
4347 | /* Count all active objects as busy, even if they are currently not used | 4215 | /* Count all active objects as busy, even if they are currently not used |
4348 | * by the gpu. Users of this interface expect objects to eventually | 4216 | * by the gpu. Users of this interface expect objects to eventually |
4349 | * become non-busy without any further actions, therefore emit any | 4217 | * become non-busy without any further actions, therefore emit any |
4350 | * necessary flushes here. | 4218 | * necessary flushes here. |
4351 | */ | 4219 | */ |
4352 | obj_priv = to_intel_bo(obj); | ||
4353 | args->busy = obj_priv->active; | 4220 | args->busy = obj_priv->active; |
4354 | if (args->busy) { | 4221 | if (args->busy) { |
4355 | /* Unconditionally flush objects, even when the gpu still uses this | 4222 | /* Unconditionally flush objects, even when the gpu still uses this |
@@ -4373,8 +4240,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4373 | } | 4240 | } |
4374 | 4241 | ||
4375 | drm_gem_object_unreference(obj); | 4242 | drm_gem_object_unreference(obj); |
4243 | unlock: | ||
4376 | mutex_unlock(&dev->struct_mutex); | 4244 | mutex_unlock(&dev->struct_mutex); |
4377 | return 0; | 4245 | return ret; |
4378 | } | 4246 | } |
4379 | 4247 | ||
4380 | int | 4248 | int |
@@ -4401,26 +4269,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4401 | return -EINVAL; | 4269 | return -EINVAL; |
4402 | } | 4270 | } |
4403 | 4271 | ||
4272 | ret = i915_mutex_lock_interruptible(dev); | ||
4273 | if (ret) | ||
4274 | return ret; | ||
4275 | |||
4404 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4276 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
4405 | if (obj == NULL) { | 4277 | if (obj == NULL) { |
4406 | DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", | 4278 | ret = -ENOENT; |
4407 | args->handle); | 4279 | goto unlock; |
4408 | return -ENOENT; | ||
4409 | } | 4280 | } |
4410 | obj_priv = to_intel_bo(obj); | 4281 | obj_priv = to_intel_bo(obj); |
4411 | 4282 | ||
4412 | ret = i915_mutex_lock_interruptible(dev); | ||
4413 | if (ret) { | ||
4414 | drm_gem_object_unreference_unlocked(obj); | ||
4415 | return ret; | ||
4416 | } | ||
4417 | |||
4418 | if (obj_priv->pin_count) { | 4283 | if (obj_priv->pin_count) { |
4419 | drm_gem_object_unreference(obj); | 4284 | ret = -EINVAL; |
4420 | mutex_unlock(&dev->struct_mutex); | 4285 | goto out; |
4421 | |||
4422 | DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n"); | ||
4423 | return -EINVAL; | ||
4424 | } | 4286 | } |
4425 | 4287 | ||
4426 | if (obj_priv->madv != __I915_MADV_PURGED) | 4288 | if (obj_priv->madv != __I915_MADV_PURGED) |
@@ -4433,10 +4295,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4433 | 4295 | ||
4434 | args->retained = obj_priv->madv != __I915_MADV_PURGED; | 4296 | args->retained = obj_priv->madv != __I915_MADV_PURGED; |
4435 | 4297 | ||
4298 | out: | ||
4436 | drm_gem_object_unreference(obj); | 4299 | drm_gem_object_unreference(obj); |
4300 | unlock: | ||
4437 | mutex_unlock(&dev->struct_mutex); | 4301 | mutex_unlock(&dev->struct_mutex); |
4438 | 4302 | return ret; | |
4439 | return 0; | ||
4440 | } | 4303 | } |
4441 | 4304 | ||
4442 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | 4305 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, |
@@ -4462,12 +4325,11 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | |||
4462 | obj->agp_type = AGP_USER_MEMORY; | 4325 | obj->agp_type = AGP_USER_MEMORY; |
4463 | obj->base.driver_private = NULL; | 4326 | obj->base.driver_private = NULL; |
4464 | obj->fence_reg = I915_FENCE_REG_NONE; | 4327 | obj->fence_reg = I915_FENCE_REG_NONE; |
4465 | INIT_LIST_HEAD(&obj->list); | 4328 | INIT_LIST_HEAD(&obj->mm_list); |
4329 | INIT_LIST_HEAD(&obj->ring_list); | ||
4466 | INIT_LIST_HEAD(&obj->gpu_write_list); | 4330 | INIT_LIST_HEAD(&obj->gpu_write_list); |
4467 | obj->madv = I915_MADV_WILLNEED; | 4331 | obj->madv = I915_MADV_WILLNEED; |
4468 | 4332 | ||
4469 | trace_i915_gem_object_create(&obj->base); | ||
4470 | |||
4471 | return &obj->base; | 4333 | return &obj->base; |
4472 | } | 4334 | } |
4473 | 4335 | ||
@@ -4487,7 +4349,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj) | |||
4487 | 4349 | ||
4488 | ret = i915_gem_object_unbind(obj); | 4350 | ret = i915_gem_object_unbind(obj); |
4489 | if (ret == -ERESTARTSYS) { | 4351 | if (ret == -ERESTARTSYS) { |
4490 | list_move(&obj_priv->list, | 4352 | list_move(&obj_priv->mm_list, |
4491 | &dev_priv->mm.deferred_free_list); | 4353 | &dev_priv->mm.deferred_free_list); |
4492 | return; | 4354 | return; |
4493 | } | 4355 | } |
@@ -4527,10 +4389,7 @@ i915_gem_idle(struct drm_device *dev) | |||
4527 | 4389 | ||
4528 | mutex_lock(&dev->struct_mutex); | 4390 | mutex_lock(&dev->struct_mutex); |
4529 | 4391 | ||
4530 | if (dev_priv->mm.suspended || | 4392 | if (dev_priv->mm.suspended) { |
4531 | (dev_priv->render_ring.gem_object == NULL) || | ||
4532 | (HAS_BSD(dev) && | ||
4533 | dev_priv->bsd_ring.gem_object == NULL)) { | ||
4534 | mutex_unlock(&dev->struct_mutex); | 4393 | mutex_unlock(&dev->struct_mutex); |
4535 | return 0; | 4394 | return 0; |
4536 | } | 4395 | } |
@@ -4651,10 +4510,18 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
4651 | goto cleanup_render_ring; | 4510 | goto cleanup_render_ring; |
4652 | } | 4511 | } |
4653 | 4512 | ||
4513 | if (HAS_BLT(dev)) { | ||
4514 | ret = intel_init_blt_ring_buffer(dev); | ||
4515 | if (ret) | ||
4516 | goto cleanup_bsd_ring; | ||
4517 | } | ||
4518 | |||
4654 | dev_priv->next_seqno = 1; | 4519 | dev_priv->next_seqno = 1; |
4655 | 4520 | ||
4656 | return 0; | 4521 | return 0; |
4657 | 4522 | ||
4523 | cleanup_bsd_ring: | ||
4524 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | ||
4658 | cleanup_render_ring: | 4525 | cleanup_render_ring: |
4659 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 4526 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); |
4660 | cleanup_pipe_control: | 4527 | cleanup_pipe_control: |
@@ -4669,8 +4536,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) | |||
4669 | drm_i915_private_t *dev_priv = dev->dev_private; | 4536 | drm_i915_private_t *dev_priv = dev->dev_private; |
4670 | 4537 | ||
4671 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 4538 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); |
4672 | if (HAS_BSD(dev)) | 4539 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); |
4673 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | 4540 | intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); |
4674 | if (HAS_PIPE_CONTROL(dev)) | 4541 | if (HAS_PIPE_CONTROL(dev)) |
4675 | i915_gem_cleanup_pipe_control(dev); | 4542 | i915_gem_cleanup_pipe_control(dev); |
4676 | } | 4543 | } |
@@ -4699,12 +4566,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4699 | return ret; | 4566 | return ret; |
4700 | } | 4567 | } |
4701 | 4568 | ||
4569 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); | ||
4702 | BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); | 4570 | BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); |
4703 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); | 4571 | BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); |
4572 | BUG_ON(!list_empty(&dev_priv->blt_ring.active_list)); | ||
4704 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 4573 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); |
4705 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | 4574 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); |
4706 | BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); | 4575 | BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); |
4707 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); | 4576 | BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); |
4577 | BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); | ||
4708 | mutex_unlock(&dev->struct_mutex); | 4578 | mutex_unlock(&dev->struct_mutex); |
4709 | 4579 | ||
4710 | ret = drm_irq_install(dev); | 4580 | ret = drm_irq_install(dev); |
@@ -4746,24 +4616,29 @@ i915_gem_lastclose(struct drm_device *dev) | |||
4746 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 4616 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
4747 | } | 4617 | } |
4748 | 4618 | ||
4619 | static void | ||
4620 | init_ring_lists(struct intel_ring_buffer *ring) | ||
4621 | { | ||
4622 | INIT_LIST_HEAD(&ring->active_list); | ||
4623 | INIT_LIST_HEAD(&ring->request_list); | ||
4624 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
4625 | } | ||
4626 | |||
4749 | void | 4627 | void |
4750 | i915_gem_load(struct drm_device *dev) | 4628 | i915_gem_load(struct drm_device *dev) |
4751 | { | 4629 | { |
4752 | int i; | 4630 | int i; |
4753 | drm_i915_private_t *dev_priv = dev->dev_private; | 4631 | drm_i915_private_t *dev_priv = dev->dev_private; |
4754 | 4632 | ||
4633 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | ||
4755 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 4634 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
4756 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | ||
4757 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4635 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4758 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); | 4636 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); |
4759 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4637 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
4760 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); | 4638 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); |
4761 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); | 4639 | init_ring_lists(&dev_priv->render_ring); |
4762 | INIT_LIST_HEAD(&dev_priv->render_ring.request_list); | 4640 | init_ring_lists(&dev_priv->bsd_ring); |
4763 | if (HAS_BSD(dev)) { | 4641 | init_ring_lists(&dev_priv->blt_ring); |
4764 | INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); | ||
4765 | INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); | ||
4766 | } | ||
4767 | for (i = 0; i < 16; i++) | 4642 | for (i = 0; i < 16; i++) |
4768 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 4643 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
4769 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 4644 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
@@ -5026,9 +4901,9 @@ i915_gpu_is_active(struct drm_device *dev) | |||
5026 | int lists_empty; | 4901 | int lists_empty; |
5027 | 4902 | ||
5028 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | 4903 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && |
5029 | list_empty(&dev_priv->render_ring.active_list); | 4904 | list_empty(&dev_priv->render_ring.active_list) && |
5030 | if (HAS_BSD(dev)) | 4905 | list_empty(&dev_priv->bsd_ring.active_list) && |
5031 | lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); | 4906 | list_empty(&dev_priv->blt_ring.active_list); |
5032 | 4907 | ||
5033 | return !lists_empty; | 4908 | return !lists_empty; |
5034 | } | 4909 | } |
@@ -5050,7 +4925,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) | |||
5050 | if (mutex_trylock(&dev->struct_mutex)) { | 4925 | if (mutex_trylock(&dev->struct_mutex)) { |
5051 | list_for_each_entry(obj_priv, | 4926 | list_for_each_entry(obj_priv, |
5052 | &dev_priv->mm.inactive_list, | 4927 | &dev_priv->mm.inactive_list, |
5053 | list) | 4928 | mm_list) |
5054 | cnt++; | 4929 | cnt++; |
5055 | mutex_unlock(&dev->struct_mutex); | 4930 | mutex_unlock(&dev->struct_mutex); |
5056 | } | 4931 | } |
@@ -5076,7 +4951,7 @@ rescan: | |||
5076 | 4951 | ||
5077 | list_for_each_entry_safe(obj_priv, next_obj, | 4952 | list_for_each_entry_safe(obj_priv, next_obj, |
5078 | &dev_priv->mm.inactive_list, | 4953 | &dev_priv->mm.inactive_list, |
5079 | list) { | 4954 | mm_list) { |
5080 | if (i915_gem_object_is_purgeable(obj_priv)) { | 4955 | if (i915_gem_object_is_purgeable(obj_priv)) { |
5081 | i915_gem_object_unbind(&obj_priv->base); | 4956 | i915_gem_object_unbind(&obj_priv->base); |
5082 | if (--nr_to_scan <= 0) | 4957 | if (--nr_to_scan <= 0) |
@@ -5105,7 +4980,7 @@ rescan: | |||
5105 | 4980 | ||
5106 | list_for_each_entry_safe(obj_priv, next_obj, | 4981 | list_for_each_entry_safe(obj_priv, next_obj, |
5107 | &dev_priv->mm.inactive_list, | 4982 | &dev_priv->mm.inactive_list, |
5108 | list) { | 4983 | mm_list) { |
5109 | if (nr_to_scan > 0) { | 4984 | if (nr_to_scan > 0) { |
5110 | i915_gem_object_unbind(&obj_priv->base); | 4985 | i915_gem_object_unbind(&obj_priv->base); |
5111 | nr_to_scan--; | 4986 | nr_to_scan--; |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3d7fbf32bb1..43a4013f53f 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -31,49 +31,6 @@ | |||
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
33 | 33 | ||
34 | static struct drm_i915_gem_object * | ||
35 | i915_gem_next_active_object(struct drm_device *dev, | ||
36 | struct list_head **render_iter, | ||
37 | struct list_head **bsd_iter) | ||
38 | { | ||
39 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
40 | struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL; | ||
41 | |||
42 | if (*render_iter != &dev_priv->render_ring.active_list) | ||
43 | render_obj = list_entry(*render_iter, | ||
44 | struct drm_i915_gem_object, | ||
45 | list); | ||
46 | |||
47 | if (HAS_BSD(dev)) { | ||
48 | if (*bsd_iter != &dev_priv->bsd_ring.active_list) | ||
49 | bsd_obj = list_entry(*bsd_iter, | ||
50 | struct drm_i915_gem_object, | ||
51 | list); | ||
52 | |||
53 | if (render_obj == NULL) { | ||
54 | *bsd_iter = (*bsd_iter)->next; | ||
55 | return bsd_obj; | ||
56 | } | ||
57 | |||
58 | if (bsd_obj == NULL) { | ||
59 | *render_iter = (*render_iter)->next; | ||
60 | return render_obj; | ||
61 | } | ||
62 | |||
63 | /* XXX can we handle seqno wrapping? */ | ||
64 | if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) { | ||
65 | *render_iter = (*render_iter)->next; | ||
66 | return render_obj; | ||
67 | } else { | ||
68 | *bsd_iter = (*bsd_iter)->next; | ||
69 | return bsd_obj; | ||
70 | } | ||
71 | } else { | ||
72 | *render_iter = (*render_iter)->next; | ||
73 | return render_obj; | ||
74 | } | ||
75 | } | ||
76 | |||
77 | static bool | 34 | static bool |
78 | mark_free(struct drm_i915_gem_object *obj_priv, | 35 | mark_free(struct drm_i915_gem_object *obj_priv, |
79 | struct list_head *unwind) | 36 | struct list_head *unwind) |
@@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv, | |||
83 | return drm_mm_scan_add_block(obj_priv->gtt_space); | 40 | return drm_mm_scan_add_block(obj_priv->gtt_space); |
84 | } | 41 | } |
85 | 42 | ||
86 | #define i915_for_each_active_object(OBJ, R, B) \ | ||
87 | *(R) = dev_priv->render_ring.active_list.next; \ | ||
88 | *(B) = dev_priv->bsd_ring.active_list.next; \ | ||
89 | while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL) | ||
90 | |||
91 | int | 43 | int |
92 | i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) | 44 | i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) |
93 | { | 45 | { |
94 | drm_i915_private_t *dev_priv = dev->dev_private; | 46 | drm_i915_private_t *dev_priv = dev->dev_private; |
95 | struct list_head eviction_list, unwind_list; | 47 | struct list_head eviction_list, unwind_list; |
96 | struct drm_i915_gem_object *obj_priv; | 48 | struct drm_i915_gem_object *obj_priv; |
97 | struct list_head *render_iter, *bsd_iter; | ||
98 | int ret = 0; | 49 | int ret = 0; |
99 | 50 | ||
100 | i915_gem_retire_requests(dev); | 51 | i915_gem_retire_requests(dev); |
@@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
131 | drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); | 82 | drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); |
132 | 83 | ||
133 | /* First see if there is a large enough contiguous idle region... */ | 84 | /* First see if there is a large enough contiguous idle region... */ |
134 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | 85 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { |
135 | if (mark_free(obj_priv, &unwind_list)) | 86 | if (mark_free(obj_priv, &unwind_list)) |
136 | goto found; | 87 | goto found; |
137 | } | 88 | } |
138 | 89 | ||
139 | /* Now merge in the soon-to-be-expired objects... */ | 90 | /* Now merge in the soon-to-be-expired objects... */ |
140 | i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { | 91 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { |
141 | /* Does the object require an outstanding flush? */ | 92 | /* Does the object require an outstanding flush? */ |
142 | if (obj_priv->base.write_domain || obj_priv->pin_count) | 93 | if (obj_priv->base.write_domain || obj_priv->pin_count) |
143 | continue; | 94 | continue; |
@@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
147 | } | 98 | } |
148 | 99 | ||
149 | /* Finally add anything with a pending flush (in order of retirement) */ | 100 | /* Finally add anything with a pending flush (in order of retirement) */ |
150 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { | 101 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { |
151 | if (obj_priv->pin_count) | 102 | if (obj_priv->pin_count) |
152 | continue; | 103 | continue; |
153 | 104 | ||
154 | if (mark_free(obj_priv, &unwind_list)) | 105 | if (mark_free(obj_priv, &unwind_list)) |
155 | goto found; | 106 | goto found; |
156 | } | 107 | } |
157 | i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { | 108 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { |
158 | if (! obj_priv->base.write_domain || obj_priv->pin_count) | 109 | if (! obj_priv->base.write_domain || obj_priv->pin_count) |
159 | continue; | 110 | continue; |
160 | 111 | ||
@@ -215,8 +166,8 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
215 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 166 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
216 | list_empty(&dev_priv->mm.flushing_list) && | 167 | list_empty(&dev_priv->mm.flushing_list) && |
217 | list_empty(&dev_priv->render_ring.active_list) && | 168 | list_empty(&dev_priv->render_ring.active_list) && |
218 | (!HAS_BSD(dev) | 169 | list_empty(&dev_priv->bsd_ring.active_list) && |
219 | || list_empty(&dev_priv->bsd_ring.active_list))); | 170 | list_empty(&dev_priv->blt_ring.active_list)); |
220 | if (lists_empty) | 171 | if (lists_empty) |
221 | return -ENOSPC; | 172 | return -ENOSPC; |
222 | 173 | ||
@@ -234,8 +185,8 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
234 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 185 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
235 | list_empty(&dev_priv->mm.flushing_list) && | 186 | list_empty(&dev_priv->mm.flushing_list) && |
236 | list_empty(&dev_priv->render_ring.active_list) && | 187 | list_empty(&dev_priv->render_ring.active_list) && |
237 | (!HAS_BSD(dev) | 188 | list_empty(&dev_priv->bsd_ring.active_list) && |
238 | || list_empty(&dev_priv->bsd_ring.active_list))); | 189 | list_empty(&dev_priv->blt_ring.active_list)); |
239 | BUG_ON(!lists_empty); | 190 | BUG_ON(!lists_empty); |
240 | 191 | ||
241 | return 0; | 192 | return 0; |
@@ -253,7 +204,7 @@ i915_gem_evict_inactive(struct drm_device *dev) | |||
253 | 204 | ||
254 | obj = &list_first_entry(&dev_priv->mm.inactive_list, | 205 | obj = &list_first_entry(&dev_priv->mm.inactive_list, |
255 | struct drm_i915_gem_object, | 206 | struct drm_i915_gem_object, |
256 | list)->base; | 207 | mm_list)->base; |
257 | 208 | ||
258 | ret = i915_gem_object_unbind(obj); | 209 | ret = i915_gem_object_unbind(obj); |
259 | if (ret != 0) { | 210 | if (ret != 0) { |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 8c9ffc4768e..af352de70be 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -92,7 +92,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
92 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | 92 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
94 | 94 | ||
95 | if (IS_IRONLAKE(dev) || IS_GEN6(dev)) { | 95 | if (IS_GEN5(dev) || IS_GEN6(dev)) { |
96 | /* On Ironlake whatever DRAM config, GPU always do | 96 | /* On Ironlake whatever DRAM config, GPU always do |
97 | * same swizzling setup. | 97 | * same swizzling setup. |
98 | */ | 98 | */ |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 64c07c24e30..237b8bdb599 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -293,13 +293,26 @@ static void i915_handle_rps_change(struct drm_device *dev) | |||
293 | return; | 293 | return; |
294 | } | 294 | } |
295 | 295 | ||
296 | static void notify_ring(struct drm_device *dev, | ||
297 | struct intel_ring_buffer *ring) | ||
298 | { | ||
299 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
300 | u32 seqno = ring->get_seqno(dev, ring); | ||
301 | ring->irq_gem_seqno = seqno; | ||
302 | trace_i915_gem_request_complete(dev, seqno); | ||
303 | wake_up_all(&ring->irq_queue); | ||
304 | dev_priv->hangcheck_count = 0; | ||
305 | mod_timer(&dev_priv->hangcheck_timer, | ||
306 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
307 | } | ||
308 | |||
296 | static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | 309 | static irqreturn_t ironlake_irq_handler(struct drm_device *dev) |
297 | { | 310 | { |
298 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 311 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
299 | int ret = IRQ_NONE; | 312 | int ret = IRQ_NONE; |
300 | u32 de_iir, gt_iir, de_ier, pch_iir; | 313 | u32 de_iir, gt_iir, de_ier, pch_iir; |
314 | u32 hotplug_mask; | ||
301 | struct drm_i915_master_private *master_priv; | 315 | struct drm_i915_master_private *master_priv; |
302 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | ||
303 | u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; | 316 | u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; |
304 | 317 | ||
305 | if (IS_GEN6(dev)) | 318 | if (IS_GEN6(dev)) |
@@ -317,6 +330,11 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
317 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) | 330 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) |
318 | goto done; | 331 | goto done; |
319 | 332 | ||
333 | if (HAS_PCH_CPT(dev)) | ||
334 | hotplug_mask = SDE_HOTPLUG_MASK_CPT; | ||
335 | else | ||
336 | hotplug_mask = SDE_HOTPLUG_MASK; | ||
337 | |||
320 | ret = IRQ_HANDLED; | 338 | ret = IRQ_HANDLED; |
321 | 339 | ||
322 | if (dev->primary->master) { | 340 | if (dev->primary->master) { |
@@ -326,17 +344,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
326 | READ_BREADCRUMB(dev_priv); | 344 | READ_BREADCRUMB(dev_priv); |
327 | } | 345 | } |
328 | 346 | ||
329 | if (gt_iir & GT_PIPE_NOTIFY) { | 347 | if (gt_iir & GT_PIPE_NOTIFY) |
330 | u32 seqno = render_ring->get_seqno(dev, render_ring); | 348 | notify_ring(dev, &dev_priv->render_ring); |
331 | render_ring->irq_gem_seqno = seqno; | ||
332 | trace_i915_gem_request_complete(dev, seqno); | ||
333 | wake_up_all(&dev_priv->render_ring.irq_queue); | ||
334 | dev_priv->hangcheck_count = 0; | ||
335 | mod_timer(&dev_priv->hangcheck_timer, | ||
336 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
337 | } | ||
338 | if (gt_iir & bsd_usr_interrupt) | 349 | if (gt_iir & bsd_usr_interrupt) |
339 | wake_up_all(&dev_priv->bsd_ring.irq_queue); | 350 | notify_ring(dev, &dev_priv->bsd_ring); |
351 | if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) | ||
352 | notify_ring(dev, &dev_priv->blt_ring); | ||
340 | 353 | ||
341 | if (de_iir & DE_GSE) | 354 | if (de_iir & DE_GSE) |
342 | intel_opregion_gse_intr(dev); | 355 | intel_opregion_gse_intr(dev); |
@@ -358,10 +371,8 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
358 | drm_handle_vblank(dev, 1); | 371 | drm_handle_vblank(dev, 1); |
359 | 372 | ||
360 | /* check event from PCH */ | 373 | /* check event from PCH */ |
361 | if ((de_iir & DE_PCH_EVENT) && | 374 | if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) |
362 | (pch_iir & SDE_HOTPLUG_MASK)) { | ||
363 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 375 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
364 | } | ||
365 | 376 | ||
366 | if (de_iir & DE_PCU_EVENT) { | 377 | if (de_iir & DE_PCU_EVENT) { |
367 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | 378 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
@@ -604,9 +615,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
604 | batchbuffer[0] = NULL; | 615 | batchbuffer[0] = NULL; |
605 | batchbuffer[1] = NULL; | 616 | batchbuffer[1] = NULL; |
606 | count = 0; | 617 | count = 0; |
607 | list_for_each_entry(obj_priv, | 618 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { |
608 | &dev_priv->render_ring.active_list, list) { | ||
609 | |||
610 | struct drm_gem_object *obj = &obj_priv->base; | 619 | struct drm_gem_object *obj = &obj_priv->base; |
611 | 620 | ||
612 | if (batchbuffer[0] == NULL && | 621 | if (batchbuffer[0] == NULL && |
@@ -623,7 +632,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
623 | } | 632 | } |
624 | /* Scan the other lists for completeness for those bizarre errors. */ | 633 | /* Scan the other lists for completeness for those bizarre errors. */ |
625 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | 634 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { |
626 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { | 635 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { |
627 | struct drm_gem_object *obj = &obj_priv->base; | 636 | struct drm_gem_object *obj = &obj_priv->base; |
628 | 637 | ||
629 | if (batchbuffer[0] == NULL && | 638 | if (batchbuffer[0] == NULL && |
@@ -641,7 +650,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
641 | } | 650 | } |
642 | } | 651 | } |
643 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | 652 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { |
644 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | 653 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { |
645 | struct drm_gem_object *obj = &obj_priv->base; | 654 | struct drm_gem_object *obj = &obj_priv->base; |
646 | 655 | ||
647 | if (batchbuffer[0] == NULL && | 656 | if (batchbuffer[0] == NULL && |
@@ -660,7 +669,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
660 | } | 669 | } |
661 | 670 | ||
662 | /* We need to copy these to an anonymous buffer as the simplest | 671 | /* We need to copy these to an anonymous buffer as the simplest |
663 | * method to avoid being overwritten by userpace. | 672 | * method to avoid being overwritten by userspace. |
664 | */ | 673 | */ |
665 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); | 674 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); |
666 | if (batchbuffer[1] != batchbuffer[0]) | 675 | if (batchbuffer[1] != batchbuffer[0]) |
@@ -682,8 +691,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
682 | 691 | ||
683 | if (error->active_bo) { | 692 | if (error->active_bo) { |
684 | int i = 0; | 693 | int i = 0; |
685 | list_for_each_entry(obj_priv, | 694 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { |
686 | &dev_priv->render_ring.active_list, list) { | ||
687 | struct drm_gem_object *obj = &obj_priv->base; | 695 | struct drm_gem_object *obj = &obj_priv->base; |
688 | 696 | ||
689 | error->active_bo[i].size = obj->size; | 697 | error->active_bo[i].size = obj->size; |
@@ -880,6 +888,8 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) | |||
880 | wake_up_all(&dev_priv->render_ring.irq_queue); | 888 | wake_up_all(&dev_priv->render_ring.irq_queue); |
881 | if (HAS_BSD(dev)) | 889 | if (HAS_BSD(dev)) |
882 | wake_up_all(&dev_priv->bsd_ring.irq_queue); | 890 | wake_up_all(&dev_priv->bsd_ring.irq_queue); |
891 | if (HAS_BLT(dev)) | ||
892 | wake_up_all(&dev_priv->blt_ring.irq_queue); | ||
883 | } | 893 | } |
884 | 894 | ||
885 | queue_work(dev_priv->wq, &dev_priv->error_work); | 895 | queue_work(dev_priv->wq, &dev_priv->error_work); |
@@ -940,7 +950,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
940 | unsigned long irqflags; | 950 | unsigned long irqflags; |
941 | int irq_received; | 951 | int irq_received; |
942 | int ret = IRQ_NONE; | 952 | int ret = IRQ_NONE; |
943 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | ||
944 | 953 | ||
945 | atomic_inc(&dev_priv->irq_received); | 954 | atomic_inc(&dev_priv->irq_received); |
946 | 955 | ||
@@ -1017,18 +1026,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1017 | READ_BREADCRUMB(dev_priv); | 1026 | READ_BREADCRUMB(dev_priv); |
1018 | } | 1027 | } |
1019 | 1028 | ||
1020 | if (iir & I915_USER_INTERRUPT) { | 1029 | if (iir & I915_USER_INTERRUPT) |
1021 | u32 seqno = render_ring->get_seqno(dev, render_ring); | 1030 | notify_ring(dev, &dev_priv->render_ring); |
1022 | render_ring->irq_gem_seqno = seqno; | ||
1023 | trace_i915_gem_request_complete(dev, seqno); | ||
1024 | wake_up_all(&dev_priv->render_ring.irq_queue); | ||
1025 | dev_priv->hangcheck_count = 0; | ||
1026 | mod_timer(&dev_priv->hangcheck_timer, | ||
1027 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
1028 | } | ||
1029 | |||
1030 | if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) | 1031 | if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) |
1031 | wake_up_all(&dev_priv->bsd_ring.irq_queue); | 1032 | notify_ring(dev, &dev_priv->bsd_ring); |
1032 | 1033 | ||
1033 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { | 1034 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { |
1034 | intel_prepare_page_flip(dev, 0); | 1035 | intel_prepare_page_flip(dev, 0); |
@@ -1357,6 +1358,12 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1357 | missed_wakeup = true; | 1358 | missed_wakeup = true; |
1358 | } | 1359 | } |
1359 | 1360 | ||
1361 | if (dev_priv->blt_ring.waiting_gem_seqno && | ||
1362 | waitqueue_active(&dev_priv->blt_ring.irq_queue)) { | ||
1363 | wake_up_all(&dev_priv->blt_ring.irq_queue); | ||
1364 | missed_wakeup = true; | ||
1365 | } | ||
1366 | |||
1360 | if (missed_wakeup) | 1367 | if (missed_wakeup) |
1361 | DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); | 1368 | DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); |
1362 | return; | 1369 | return; |
@@ -1431,8 +1438,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1431 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1438 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1432 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1439 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
1433 | u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; | 1440 | u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; |
1434 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1441 | u32 hotplug_mask; |
1435 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | ||
1436 | 1442 | ||
1437 | dev_priv->irq_mask_reg = ~display_mask; | 1443 | dev_priv->irq_mask_reg = ~display_mask; |
1438 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; | 1444 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; |
@@ -1443,8 +1449,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1443 | I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); | 1449 | I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); |
1444 | (void) I915_READ(DEIER); | 1450 | (void) I915_READ(DEIER); |
1445 | 1451 | ||
1446 | if (IS_GEN6(dev)) | 1452 | if (IS_GEN6(dev)) { |
1447 | render_mask = GT_PIPE_NOTIFY | GT_GEN6_BSD_USER_INTERRUPT; | 1453 | render_mask = |
1454 | GT_PIPE_NOTIFY | | ||
1455 | GT_GEN6_BSD_USER_INTERRUPT | | ||
1456 | GT_BLT_USER_INTERRUPT; | ||
1457 | } | ||
1448 | 1458 | ||
1449 | dev_priv->gt_irq_mask_reg = ~render_mask; | 1459 | dev_priv->gt_irq_mask_reg = ~render_mask; |
1450 | dev_priv->gt_irq_enable_reg = render_mask; | 1460 | dev_priv->gt_irq_enable_reg = render_mask; |
@@ -1454,11 +1464,20 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1454 | if (IS_GEN6(dev)) { | 1464 | if (IS_GEN6(dev)) { |
1455 | I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); | 1465 | I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); |
1456 | I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); | 1466 | I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); |
1467 | I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); | ||
1457 | } | 1468 | } |
1458 | 1469 | ||
1459 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); | 1470 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); |
1460 | (void) I915_READ(GTIER); | 1471 | (void) I915_READ(GTIER); |
1461 | 1472 | ||
1473 | if (HAS_PCH_CPT(dev)) { | ||
1474 | hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | | ||
1475 | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ; | ||
1476 | } else { | ||
1477 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | ||
1478 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | ||
1479 | } | ||
1480 | |||
1462 | dev_priv->pch_irq_mask_reg = ~hotplug_mask; | 1481 | dev_priv->pch_irq_mask_reg = ~hotplug_mask; |
1463 | dev_priv->pch_irq_enable_reg = hotplug_mask; | 1482 | dev_priv->pch_irq_enable_reg = hotplug_mask; |
1464 | 1483 | ||
@@ -1515,9 +1534,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1515 | u32 error_mask; | 1534 | u32 error_mask; |
1516 | 1535 | ||
1517 | DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); | 1536 | DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); |
1518 | |||
1519 | if (HAS_BSD(dev)) | 1537 | if (HAS_BSD(dev)) |
1520 | DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); | 1538 | DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); |
1539 | if (HAS_BLT(dev)) | ||
1540 | DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); | ||
1521 | 1541 | ||
1522 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | 1542 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
1523 | 1543 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d02de212e6a..25ed911a311 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -263,6 +263,7 @@ | |||
263 | #define RENDER_RING_BASE 0x02000 | 263 | #define RENDER_RING_BASE 0x02000 |
264 | #define BSD_RING_BASE 0x04000 | 264 | #define BSD_RING_BASE 0x04000 |
265 | #define GEN6_BSD_RING_BASE 0x12000 | 265 | #define GEN6_BSD_RING_BASE 0x12000 |
266 | #define BLT_RING_BASE 0x22000 | ||
266 | #define RING_TAIL(base) ((base)+0x30) | 267 | #define RING_TAIL(base) ((base)+0x30) |
267 | #define RING_HEAD(base) ((base)+0x34) | 268 | #define RING_HEAD(base) ((base)+0x34) |
268 | #define RING_START(base) ((base)+0x38) | 269 | #define RING_START(base) ((base)+0x38) |
@@ -661,13 +662,6 @@ | |||
661 | #define LVDS 0x61180 | 662 | #define LVDS 0x61180 |
662 | #define LVDS_ON (1<<31) | 663 | #define LVDS_ON (1<<31) |
663 | 664 | ||
664 | #define ADPA 0x61100 | ||
665 | #define ADPA_DPMS_MASK (~(3<<10)) | ||
666 | #define ADPA_DPMS_ON (0<<10) | ||
667 | #define ADPA_DPMS_SUSPEND (1<<10) | ||
668 | #define ADPA_DPMS_STANDBY (2<<10) | ||
669 | #define ADPA_DPMS_OFF (3<<10) | ||
670 | |||
671 | /* Scratch pad debug 0 reg: | 665 | /* Scratch pad debug 0 reg: |
672 | */ | 666 | */ |
673 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 | 667 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 |
@@ -1200,6 +1194,7 @@ | |||
1200 | #define ADPA_DPMS_STANDBY (2<<10) | 1194 | #define ADPA_DPMS_STANDBY (2<<10) |
1201 | #define ADPA_DPMS_OFF (3<<10) | 1195 | #define ADPA_DPMS_OFF (3<<10) |
1202 | 1196 | ||
1197 | |||
1203 | /* Hotplug control (945+ only) */ | 1198 | /* Hotplug control (945+ only) */ |
1204 | #define PORT_HOTPLUG_EN 0x61110 | 1199 | #define PORT_HOTPLUG_EN 0x61110 |
1205 | #define HDMIB_HOTPLUG_INT_EN (1 << 29) | 1200 | #define HDMIB_HOTPLUG_INT_EN (1 << 29) |
@@ -1358,6 +1353,22 @@ | |||
1358 | #define LVDS_B0B3_POWER_DOWN (0 << 2) | 1353 | #define LVDS_B0B3_POWER_DOWN (0 << 2) |
1359 | #define LVDS_B0B3_POWER_UP (3 << 2) | 1354 | #define LVDS_B0B3_POWER_UP (3 << 2) |
1360 | 1355 | ||
1356 | /* Video Data Island Packet control */ | ||
1357 | #define VIDEO_DIP_DATA 0x61178 | ||
1358 | #define VIDEO_DIP_CTL 0x61170 | ||
1359 | #define VIDEO_DIP_ENABLE (1 << 31) | ||
1360 | #define VIDEO_DIP_PORT_B (1 << 29) | ||
1361 | #define VIDEO_DIP_PORT_C (2 << 29) | ||
1362 | #define VIDEO_DIP_ENABLE_AVI (1 << 21) | ||
1363 | #define VIDEO_DIP_ENABLE_VENDOR (2 << 21) | ||
1364 | #define VIDEO_DIP_ENABLE_SPD (8 << 21) | ||
1365 | #define VIDEO_DIP_SELECT_AVI (0 << 19) | ||
1366 | #define VIDEO_DIP_SELECT_VENDOR (1 << 19) | ||
1367 | #define VIDEO_DIP_SELECT_SPD (3 << 19) | ||
1368 | #define VIDEO_DIP_FREQ_ONCE (0 << 16) | ||
1369 | #define VIDEO_DIP_FREQ_VSYNC (1 << 16) | ||
1370 | #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) | ||
1371 | |||
1361 | /* Panel power sequencing */ | 1372 | /* Panel power sequencing */ |
1362 | #define PP_STATUS 0x61200 | 1373 | #define PP_STATUS 0x61200 |
1363 | #define PP_ON (1 << 31) | 1374 | #define PP_ON (1 << 31) |
@@ -1373,6 +1384,9 @@ | |||
1373 | #define PP_SEQUENCE_ON (1 << 28) | 1384 | #define PP_SEQUENCE_ON (1 << 28) |
1374 | #define PP_SEQUENCE_OFF (2 << 28) | 1385 | #define PP_SEQUENCE_OFF (2 << 28) |
1375 | #define PP_SEQUENCE_MASK 0x30000000 | 1386 | #define PP_SEQUENCE_MASK 0x30000000 |
1387 | #define PP_CYCLE_DELAY_ACTIVE (1 << 27) | ||
1388 | #define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) | ||
1389 | #define PP_SEQUENCE_STATE_MASK 0x0000000f | ||
1376 | #define PP_CONTROL 0x61204 | 1390 | #define PP_CONTROL 0x61204 |
1377 | #define POWER_TARGET_ON (1 << 0) | 1391 | #define POWER_TARGET_ON (1 << 0) |
1378 | #define PP_ON_DELAYS 0x61208 | 1392 | #define PP_ON_DELAYS 0x61208 |
@@ -2564,6 +2578,7 @@ | |||
2564 | #define GT_USER_INTERRUPT (1 << 0) | 2578 | #define GT_USER_INTERRUPT (1 << 0) |
2565 | #define GT_BSD_USER_INTERRUPT (1 << 5) | 2579 | #define GT_BSD_USER_INTERRUPT (1 << 5) |
2566 | #define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) | 2580 | #define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) |
2581 | #define GT_BLT_USER_INTERRUPT (1 << 22) | ||
2567 | 2582 | ||
2568 | #define GTISR 0x44010 | 2583 | #define GTISR 0x44010 |
2569 | #define GTIMR 0x44014 | 2584 | #define GTIMR 0x44014 |
@@ -2598,6 +2613,10 @@ | |||
2598 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) | 2613 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) |
2599 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) | 2614 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) |
2600 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) | 2615 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) |
2616 | #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ | ||
2617 | SDE_PORTD_HOTPLUG_CPT | \ | ||
2618 | SDE_PORTC_HOTPLUG_CPT | \ | ||
2619 | SDE_PORTB_HOTPLUG_CPT) | ||
2601 | 2620 | ||
2602 | #define SDEISR 0xc4000 | 2621 | #define SDEISR 0xc4000 |
2603 | #define SDEIMR 0xc4004 | 2622 | #define SDEIMR 0xc4004 |
@@ -2779,6 +2798,10 @@ | |||
2779 | #define FDI_RXA_CHICKEN 0xc200c | 2798 | #define FDI_RXA_CHICKEN 0xc200c |
2780 | #define FDI_RXB_CHICKEN 0xc2010 | 2799 | #define FDI_RXB_CHICKEN 0xc2010 |
2781 | #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) | 2800 | #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) |
2801 | #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN) | ||
2802 | |||
2803 | #define SOUTH_DSPCLK_GATE_D 0xc2020 | ||
2804 | #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) | ||
2782 | 2805 | ||
2783 | /* CPU: FDI_TX */ | 2806 | /* CPU: FDI_TX */ |
2784 | #define FDI_TXA_CTL 0x60100 | 2807 | #define FDI_TXA_CTL 0x60100 |
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c new file mode 100644 index 00000000000..65c88f9ba12 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_acpi.c | |||
@@ -0,0 +1,286 @@ | |||
1 | /* | ||
2 | * Intel ACPI functions | ||
3 | * | ||
4 | * _DSM related code stolen from nouveau_acpi.c. | ||
5 | */ | ||
6 | #include <linux/pci.h> | ||
7 | #include <linux/acpi.h> | ||
8 | #include <linux/vga_switcheroo.h> | ||
9 | #include <acpi/acpi_drivers.h> | ||
10 | |||
11 | #include "drmP.h" | ||
12 | |||
13 | #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ | ||
14 | |||
15 | #define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */ | ||
16 | #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ | ||
17 | |||
18 | static struct intel_dsm_priv { | ||
19 | acpi_handle dhandle; | ||
20 | } intel_dsm_priv; | ||
21 | |||
22 | static const u8 intel_dsm_guid[] = { | ||
23 | 0xd3, 0x73, 0xd8, 0x7e, | ||
24 | 0xd0, 0xc2, | ||
25 | 0x4f, 0x4e, | ||
26 | 0xa8, 0x54, | ||
27 | 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c | ||
28 | }; | ||
29 | |||
30 | static int intel_dsm(acpi_handle handle, int func, int arg) | ||
31 | { | ||
32 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
33 | struct acpi_object_list input; | ||
34 | union acpi_object params[4]; | ||
35 | union acpi_object *obj; | ||
36 | u32 result; | ||
37 | int ret = 0; | ||
38 | |||
39 | input.count = 4; | ||
40 | input.pointer = params; | ||
41 | params[0].type = ACPI_TYPE_BUFFER; | ||
42 | params[0].buffer.length = sizeof(intel_dsm_guid); | ||
43 | params[0].buffer.pointer = (char *)intel_dsm_guid; | ||
44 | params[1].type = ACPI_TYPE_INTEGER; | ||
45 | params[1].integer.value = INTEL_DSM_REVISION_ID; | ||
46 | params[2].type = ACPI_TYPE_INTEGER; | ||
47 | params[2].integer.value = func; | ||
48 | params[3].type = ACPI_TYPE_INTEGER; | ||
49 | params[3].integer.value = arg; | ||
50 | |||
51 | ret = acpi_evaluate_object(handle, "_DSM", &input, &output); | ||
52 | if (ret) { | ||
53 | DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); | ||
54 | return ret; | ||
55 | } | ||
56 | |||
57 | obj = (union acpi_object *)output.pointer; | ||
58 | |||
59 | result = 0; | ||
60 | switch (obj->type) { | ||
61 | case ACPI_TYPE_INTEGER: | ||
62 | result = obj->integer.value; | ||
63 | break; | ||
64 | |||
65 | case ACPI_TYPE_BUFFER: | ||
66 | if (obj->buffer.length == 4) { | ||
67 | result =(obj->buffer.pointer[0] | | ||
68 | (obj->buffer.pointer[1] << 8) | | ||
69 | (obj->buffer.pointer[2] << 16) | | ||
70 | (obj->buffer.pointer[3] << 24)); | ||
71 | break; | ||
72 | } | ||
73 | default: | ||
74 | ret = -EINVAL; | ||
75 | break; | ||
76 | } | ||
77 | if (result == 0x80000002) | ||
78 | ret = -ENODEV; | ||
79 | |||
80 | kfree(output.pointer); | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | static char *intel_dsm_port_name(u8 id) | ||
85 | { | ||
86 | switch (id) { | ||
87 | case 0: | ||
88 | return "Reserved"; | ||
89 | case 1: | ||
90 | return "Analog VGA"; | ||
91 | case 2: | ||
92 | return "LVDS"; | ||
93 | case 3: | ||
94 | return "Reserved"; | ||
95 | case 4: | ||
96 | return "HDMI/DVI_B"; | ||
97 | case 5: | ||
98 | return "HDMI/DVI_C"; | ||
99 | case 6: | ||
100 | return "HDMI/DVI_D"; | ||
101 | case 7: | ||
102 | return "DisplayPort_A"; | ||
103 | case 8: | ||
104 | return "DisplayPort_B"; | ||
105 | case 9: | ||
106 | return "DisplayPort_C"; | ||
107 | case 0xa: | ||
108 | return "DisplayPort_D"; | ||
109 | case 0xb: | ||
110 | case 0xc: | ||
111 | case 0xd: | ||
112 | return "Reserved"; | ||
113 | case 0xe: | ||
114 | return "WiDi"; | ||
115 | default: | ||
116 | return "bad type"; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static char *intel_dsm_mux_type(u8 type) | ||
121 | { | ||
122 | switch (type) { | ||
123 | case 0: | ||
124 | return "unknown"; | ||
125 | case 1: | ||
126 | return "No MUX, iGPU only"; | ||
127 | case 2: | ||
128 | return "No MUX, dGPU only"; | ||
129 | case 3: | ||
130 | return "MUXed between iGPU and dGPU"; | ||
131 | default: | ||
132 | return "bad type"; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | static void intel_dsm_platform_mux_info(void) | ||
137 | { | ||
138 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
139 | struct acpi_object_list input; | ||
140 | union acpi_object params[4]; | ||
141 | union acpi_object *pkg; | ||
142 | int i, ret; | ||
143 | |||
144 | input.count = 4; | ||
145 | input.pointer = params; | ||
146 | params[0].type = ACPI_TYPE_BUFFER; | ||
147 | params[0].buffer.length = sizeof(intel_dsm_guid); | ||
148 | params[0].buffer.pointer = (char *)intel_dsm_guid; | ||
149 | params[1].type = ACPI_TYPE_INTEGER; | ||
150 | params[1].integer.value = INTEL_DSM_REVISION_ID; | ||
151 | params[2].type = ACPI_TYPE_INTEGER; | ||
152 | params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; | ||
153 | params[3].type = ACPI_TYPE_INTEGER; | ||
154 | params[3].integer.value = 0; | ||
155 | |||
156 | ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, | ||
157 | &output); | ||
158 | if (ret) { | ||
159 | DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); | ||
160 | goto out; | ||
161 | } | ||
162 | |||
163 | pkg = (union acpi_object *)output.pointer; | ||
164 | |||
165 | if (pkg->type == ACPI_TYPE_PACKAGE) { | ||
166 | union acpi_object *connector_count = &pkg->package.elements[0]; | ||
167 | DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", | ||
168 | (unsigned long long)connector_count->integer.value); | ||
169 | for (i = 1; i < pkg->package.count; i++) { | ||
170 | union acpi_object *obj = &pkg->package.elements[i]; | ||
171 | union acpi_object *connector_id = | ||
172 | &obj->package.elements[0]; | ||
173 | union acpi_object *info = &obj->package.elements[1]; | ||
174 | DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", | ||
175 | (unsigned long long)connector_id->integer.value); | ||
176 | DRM_DEBUG_DRIVER(" port id: %s\n", | ||
177 | intel_dsm_port_name(info->buffer.pointer[0])); | ||
178 | DRM_DEBUG_DRIVER(" display mux info: %s\n", | ||
179 | intel_dsm_mux_type(info->buffer.pointer[1])); | ||
180 | DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n", | ||
181 | intel_dsm_mux_type(info->buffer.pointer[2])); | ||
182 | DRM_DEBUG_DRIVER(" hpd mux info: %s\n", | ||
183 | intel_dsm_mux_type(info->buffer.pointer[3])); | ||
184 | } | ||
185 | } else { | ||
186 | DRM_ERROR("MUX INFO call failed\n"); | ||
187 | } | ||
188 | |||
189 | out: | ||
190 | kfree(output.pointer); | ||
191 | } | ||
192 | |||
193 | static int intel_dsm_switchto(enum vga_switcheroo_client_id id) | ||
194 | { | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static int intel_dsm_power_state(enum vga_switcheroo_client_id id, | ||
199 | enum vga_switcheroo_state state) | ||
200 | { | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int intel_dsm_init(void) | ||
205 | { | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int intel_dsm_get_client_id(struct pci_dev *pdev) | ||
210 | { | ||
211 | if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) | ||
212 | return VGA_SWITCHEROO_IGD; | ||
213 | else | ||
214 | return VGA_SWITCHEROO_DIS; | ||
215 | } | ||
216 | |||
217 | static struct vga_switcheroo_handler intel_dsm_handler = { | ||
218 | .switchto = intel_dsm_switchto, | ||
219 | .power_state = intel_dsm_power_state, | ||
220 | .init = intel_dsm_init, | ||
221 | .get_client_id = intel_dsm_get_client_id, | ||
222 | }; | ||
223 | |||
224 | static bool intel_dsm_pci_probe(struct pci_dev *pdev) | ||
225 | { | ||
226 | acpi_handle dhandle, intel_handle; | ||
227 | acpi_status status; | ||
228 | int ret; | ||
229 | |||
230 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | ||
231 | if (!dhandle) | ||
232 | return false; | ||
233 | |||
234 | status = acpi_get_handle(dhandle, "_DSM", &intel_handle); | ||
235 | if (ACPI_FAILURE(status)) { | ||
236 | DRM_DEBUG_KMS("no _DSM method for intel device\n"); | ||
237 | return false; | ||
238 | } | ||
239 | |||
240 | ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); | ||
241 | if (ret < 0) { | ||
242 | DRM_ERROR("failed to get supported _DSM functions\n"); | ||
243 | return false; | ||
244 | } | ||
245 | |||
246 | intel_dsm_priv.dhandle = dhandle; | ||
247 | |||
248 | intel_dsm_platform_mux_info(); | ||
249 | return true; | ||
250 | } | ||
251 | |||
252 | static bool intel_dsm_detect(void) | ||
253 | { | ||
254 | char acpi_method_name[255] = { 0 }; | ||
255 | struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; | ||
256 | struct pci_dev *pdev = NULL; | ||
257 | bool has_dsm = false; | ||
258 | int vga_count = 0; | ||
259 | |||
260 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | ||
261 | vga_count++; | ||
262 | has_dsm |= intel_dsm_pci_probe(pdev); | ||
263 | } | ||
264 | |||
265 | if (vga_count == 2 && has_dsm) { | ||
266 | acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); | ||
267 | DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n", | ||
268 | acpi_method_name); | ||
269 | return true; | ||
270 | } | ||
271 | |||
272 | return false; | ||
273 | } | ||
274 | |||
275 | void intel_register_dsm_handler(void) | ||
276 | { | ||
277 | if (!intel_dsm_detect()) | ||
278 | return; | ||
279 | |||
280 | vga_switcheroo_register_handler(&intel_dsm_handler); | ||
281 | } | ||
282 | |||
283 | void intel_unregister_dsm_handler(void) | ||
284 | { | ||
285 | vga_switcheroo_unregister_handler(); | ||
286 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b1f73ac0f3f..b0b1200ed65 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -24,6 +24,7 @@ | |||
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | #include <drm/drm_dp_helper.h> | ||
27 | #include "drmP.h" | 28 | #include "drmP.h" |
28 | #include "drm.h" | 29 | #include "drm.h" |
29 | #include "i915_drm.h" | 30 | #include "i915_drm.h" |
@@ -264,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
264 | dev_priv->lvds_use_ssc = general->enable_ssc; | 265 | dev_priv->lvds_use_ssc = general->enable_ssc; |
265 | 266 | ||
266 | if (dev_priv->lvds_use_ssc) { | 267 | if (dev_priv->lvds_use_ssc) { |
267 | if (IS_I85X(dev_priv->dev)) | 268 | if (IS_I85X(dev)) |
268 | dev_priv->lvds_ssc_freq = | 269 | dev_priv->lvds_ssc_freq = |
269 | general->ssc_freq ? 66 : 48; | 270 | general->ssc_freq ? 66 : 48; |
270 | else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev)) | 271 | else if (IS_GEN5(dev) || IS_GEN6(dev)) |
271 | dev_priv->lvds_ssc_freq = | 272 | dev_priv->lvds_ssc_freq = |
272 | general->ssc_freq ? 100 : 120; | 273 | general->ssc_freq ? 100 : 120; |
273 | else | 274 | else |
@@ -413,6 +414,8 @@ static void | |||
413 | parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | 414 | parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) |
414 | { | 415 | { |
415 | struct bdb_edp *edp; | 416 | struct bdb_edp *edp; |
417 | struct edp_power_seq *edp_pps; | ||
418 | struct edp_link_params *edp_link_params; | ||
416 | 419 | ||
417 | edp = find_section(bdb, BDB_EDP); | 420 | edp = find_section(bdb, BDB_EDP); |
418 | if (!edp) { | 421 | if (!edp) { |
@@ -437,19 +440,54 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | |||
437 | break; | 440 | break; |
438 | } | 441 | } |
439 | 442 | ||
440 | dev_priv->edp.rate = edp->link_params[panel_type].rate; | 443 | /* Get the eDP sequencing and link info */ |
441 | dev_priv->edp.lanes = edp->link_params[panel_type].lanes; | 444 | edp_pps = &edp->power_seqs[panel_type]; |
442 | dev_priv->edp.preemphasis = edp->link_params[panel_type].preemphasis; | 445 | edp_link_params = &edp->link_params[panel_type]; |
443 | dev_priv->edp.vswing = edp->link_params[panel_type].vswing; | ||
444 | 446 | ||
445 | DRM_DEBUG_KMS("eDP vBIOS settings: bpp=%d, rate=%d, lanes=%d, preemphasis=%d, vswing=%d\n", | 447 | dev_priv->edp.pps = *edp_pps; |
446 | dev_priv->edp.bpp, | ||
447 | dev_priv->edp.rate, | ||
448 | dev_priv->edp.lanes, | ||
449 | dev_priv->edp.preemphasis, | ||
450 | dev_priv->edp.vswing); | ||
451 | 448 | ||
452 | dev_priv->edp.initialized = true; | 449 | dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : |
450 | DP_LINK_BW_1_62; | ||
451 | switch (edp_link_params->lanes) { | ||
452 | case 0: | ||
453 | dev_priv->edp.lanes = 1; | ||
454 | break; | ||
455 | case 1: | ||
456 | dev_priv->edp.lanes = 2; | ||
457 | break; | ||
458 | case 3: | ||
459 | default: | ||
460 | dev_priv->edp.lanes = 4; | ||
461 | break; | ||
462 | } | ||
463 | switch (edp_link_params->preemphasis) { | ||
464 | case 0: | ||
465 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; | ||
466 | break; | ||
467 | case 1: | ||
468 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; | ||
469 | break; | ||
470 | case 2: | ||
471 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; | ||
472 | break; | ||
473 | case 3: | ||
474 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; | ||
475 | break; | ||
476 | } | ||
477 | switch (edp_link_params->vswing) { | ||
478 | case 0: | ||
479 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; | ||
480 | break; | ||
481 | case 1: | ||
482 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; | ||
483 | break; | ||
484 | case 2: | ||
485 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; | ||
486 | break; | ||
487 | case 3: | ||
488 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; | ||
489 | break; | ||
490 | } | ||
453 | } | 491 | } |
454 | 492 | ||
455 | static void | 493 | static void |
@@ -539,7 +577,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) | |||
539 | } | 577 | } |
540 | 578 | ||
541 | /** | 579 | /** |
542 | * intel_init_bios - initialize VBIOS settings & find VBT | 580 | * intel_parse_bios - find VBT and initialize settings from the BIOS |
543 | * @dev: DRM device | 581 | * @dev: DRM device |
544 | * | 582 | * |
545 | * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers | 583 | * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers |
@@ -548,7 +586,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) | |||
548 | * Returns 0 on success, nonzero on failure. | 586 | * Returns 0 on success, nonzero on failure. |
549 | */ | 587 | */ |
550 | bool | 588 | bool |
551 | intel_init_bios(struct drm_device *dev) | 589 | intel_parse_bios(struct drm_device *dev) |
552 | { | 590 | { |
553 | struct drm_i915_private *dev_priv = dev->dev_private; | 591 | struct drm_i915_private *dev_priv = dev->dev_private; |
554 | struct pci_dev *pdev = dev->pdev; | 592 | struct pci_dev *pdev = dev->pdev; |
@@ -609,3 +647,20 @@ intel_init_bios(struct drm_device *dev) | |||
609 | 647 | ||
610 | return 0; | 648 | return 0; |
611 | } | 649 | } |
650 | |||
651 | /* Ensure that vital registers have been initialised, even if the BIOS | ||
652 | * is absent or just failing to do its job. | ||
653 | */ | ||
654 | void intel_setup_bios(struct drm_device *dev) | ||
655 | { | ||
656 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
657 | |||
658 | /* Set the Panel Power On/Off timings if uninitialized. */ | ||
659 | if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { | ||
660 | /* Set T2 to 40ms and T5 to 200ms */ | ||
661 | I915_WRITE(PP_ON_DELAYS, 0x019007d0); | ||
662 | |||
663 | /* Set T3 to 35ms and Tx to 200ms */ | ||
664 | I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); | ||
665 | } | ||
666 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index e1a598f2a96..5f8e4edcbbb 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -467,7 +467,8 @@ struct bdb_edp { | |||
467 | struct edp_link_params link_params[16]; | 467 | struct edp_link_params link_params[16]; |
468 | } __attribute__ ((packed)); | 468 | } __attribute__ ((packed)); |
469 | 469 | ||
470 | bool intel_init_bios(struct drm_device *dev); | 470 | void intel_setup_bios(struct drm_device *dev); |
471 | bool intel_parse_bios(struct drm_device *dev); | ||
471 | 472 | ||
472 | /* | 473 | /* |
473 | * Driver<->VBIOS interaction occurs through scratch bits in | 474 | * Driver<->VBIOS interaction occurs through scratch bits in |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 389fcd2aea1..c55c7704335 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -191,7 +191,8 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
191 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); | 191 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); |
192 | 192 | ||
193 | if (turn_off_dac) { | 193 | if (turn_off_dac) { |
194 | I915_WRITE(PCH_ADPA, temp); | 194 | /* Make sure hotplug is enabled */ |
195 | I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE); | ||
195 | (void)I915_READ(PCH_ADPA); | 196 | (void)I915_READ(PCH_ADPA); |
196 | } | 197 | } |
197 | 198 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 96d08a9f3aa..990f065374b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -345,8 +345,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, | |||
345 | static inline u32 /* units of 100MHz */ | 345 | static inline u32 /* units of 100MHz */ |
346 | intel_fdi_link_freq(struct drm_device *dev) | 346 | intel_fdi_link_freq(struct drm_device *dev) |
347 | { | 347 | { |
348 | struct drm_i915_private *dev_priv = dev->dev_private; | 348 | if (IS_GEN5(dev)) { |
349 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; | 349 | struct drm_i915_private *dev_priv = dev->dev_private; |
350 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; | ||
351 | } else | ||
352 | return 27; | ||
350 | } | 353 | } |
351 | 354 | ||
352 | static const intel_limit_t intel_limits_i8xx_dvo = { | 355 | static const intel_limit_t intel_limits_i8xx_dvo = { |
@@ -932,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
932 | struct drm_device *dev = crtc->dev; | 935 | struct drm_device *dev = crtc->dev; |
933 | intel_clock_t clock; | 936 | intel_clock_t clock; |
934 | 937 | ||
935 | /* return directly when it is eDP */ | ||
936 | if (HAS_eDP) | ||
937 | return true; | ||
938 | |||
939 | if (target < 200000) { | 938 | if (target < 200000) { |
940 | clock.n = 1; | 939 | clock.n = 1; |
941 | clock.p1 = 2; | 940 | clock.p1 = 2; |
@@ -1719,6 +1718,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1719 | POSTING_READ(reg); | 1718 | POSTING_READ(reg); |
1720 | udelay(150); | 1719 | udelay(150); |
1721 | 1720 | ||
1721 | /* Ironlake workaround, enable clock pointer after FDI enable*/ | ||
1722 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE); | ||
1723 | |||
1722 | reg = FDI_RX_IIR(pipe); | 1724 | reg = FDI_RX_IIR(pipe); |
1723 | for (tries = 0; tries < 5; tries++) { | 1725 | for (tries = 0; tries < 5; tries++) { |
1724 | temp = I915_READ(reg); | 1726 | temp = I915_READ(reg); |
@@ -1764,6 +1766,28 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1764 | DRM_ERROR("FDI train 2 fail!\n"); | 1766 | DRM_ERROR("FDI train 2 fail!\n"); |
1765 | 1767 | ||
1766 | DRM_DEBUG_KMS("FDI train done\n"); | 1768 | DRM_DEBUG_KMS("FDI train done\n"); |
1769 | |||
1770 | /* enable normal train */ | ||
1771 | reg = FDI_TX_CTL(pipe); | ||
1772 | temp = I915_READ(reg); | ||
1773 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1774 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
1775 | I915_WRITE(reg, temp); | ||
1776 | |||
1777 | reg = FDI_RX_CTL(pipe); | ||
1778 | temp = I915_READ(reg); | ||
1779 | if (HAS_PCH_CPT(dev)) { | ||
1780 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1781 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
1782 | } else { | ||
1783 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1784 | temp |= FDI_LINK_TRAIN_NONE; | ||
1785 | } | ||
1786 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
1787 | |||
1788 | /* wait one idle pattern time */ | ||
1789 | POSTING_READ(reg); | ||
1790 | udelay(1000); | ||
1767 | } | 1791 | } |
1768 | 1792 | ||
1769 | static const int const snb_b_fdi_train_param [] = { | 1793 | static const int const snb_b_fdi_train_param [] = { |
@@ -2002,8 +2026,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2002 | 2026 | ||
2003 | /* Enable panel fitting for LVDS */ | 2027 | /* Enable panel fitting for LVDS */ |
2004 | if (dev_priv->pch_pf_size && | 2028 | if (dev_priv->pch_pf_size && |
2005 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) | 2029 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { |
2006 | || HAS_eDP || intel_pch_has_edp(crtc))) { | ||
2007 | /* Force use of hard-coded filter coefficients | 2030 | /* Force use of hard-coded filter coefficients |
2008 | * as some pre-programmed values are broken, | 2031 | * as some pre-programmed values are broken, |
2009 | * e.g. x201. | 2032 | * e.g. x201. |
@@ -2022,7 +2045,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2022 | if ((temp & PIPECONF_ENABLE) == 0) { | 2045 | if ((temp & PIPECONF_ENABLE) == 0) { |
2023 | I915_WRITE(reg, temp | PIPECONF_ENABLE); | 2046 | I915_WRITE(reg, temp | PIPECONF_ENABLE); |
2024 | POSTING_READ(reg); | 2047 | POSTING_READ(reg); |
2025 | udelay(100); | 2048 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2026 | } | 2049 | } |
2027 | 2050 | ||
2028 | /* configure and enable CPU plane */ | 2051 | /* configure and enable CPU plane */ |
@@ -2067,28 +2090,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2067 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); | 2090 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); |
2068 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); | 2091 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
2069 | 2092 | ||
2070 | /* enable normal train */ | ||
2071 | reg = FDI_TX_CTL(pipe); | ||
2072 | temp = I915_READ(reg); | ||
2073 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2074 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
2075 | I915_WRITE(reg, temp); | ||
2076 | |||
2077 | reg = FDI_RX_CTL(pipe); | ||
2078 | temp = I915_READ(reg); | ||
2079 | if (HAS_PCH_CPT(dev)) { | ||
2080 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2081 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
2082 | } else { | ||
2083 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2084 | temp |= FDI_LINK_TRAIN_NONE; | ||
2085 | } | ||
2086 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
2087 | |||
2088 | /* wait one idle pattern time */ | ||
2089 | POSTING_READ(reg); | ||
2090 | udelay(100); | ||
2091 | |||
2092 | /* For PCH DP, enable TRANS_DP_CTL */ | 2093 | /* For PCH DP, enable TRANS_DP_CTL */ |
2093 | if (HAS_PCH_CPT(dev) && | 2094 | if (HAS_PCH_CPT(dev) && |
2094 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 2095 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
@@ -2134,7 +2135,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2134 | temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; | 2135 | temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; |
2135 | I915_WRITE(reg, temp | TRANS_ENABLE); | 2136 | I915_WRITE(reg, temp | TRANS_ENABLE); |
2136 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | 2137 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
2137 | DRM_ERROR("failed to enable transcoder\n"); | 2138 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
2138 | 2139 | ||
2139 | intel_crtc_load_lut(crtc); | 2140 | intel_crtc_load_lut(crtc); |
2140 | intel_update_fbc(dev); | 2141 | intel_update_fbc(dev); |
@@ -2174,9 +2175,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
2174 | temp = I915_READ(reg); | 2175 | temp = I915_READ(reg); |
2175 | if (temp & PIPECONF_ENABLE) { | 2176 | if (temp & PIPECONF_ENABLE) { |
2176 | I915_WRITE(reg, temp & ~PIPECONF_ENABLE); | 2177 | I915_WRITE(reg, temp & ~PIPECONF_ENABLE); |
2178 | POSTING_READ(reg); | ||
2177 | /* wait for cpu pipe off, pipe state */ | 2179 | /* wait for cpu pipe off, pipe state */ |
2178 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 50)) | 2180 | intel_wait_for_pipe_off(dev, intel_crtc->pipe); |
2179 | DRM_ERROR("failed to turn off cpu pipe\n"); | ||
2180 | } | 2181 | } |
2181 | 2182 | ||
2182 | /* Disable PF */ | 2183 | /* Disable PF */ |
@@ -2198,6 +2199,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
2198 | POSTING_READ(reg); | 2199 | POSTING_READ(reg); |
2199 | udelay(100); | 2200 | udelay(100); |
2200 | 2201 | ||
2202 | /* Ironlake workaround, disable clock pointer after downing FDI */ | ||
2203 | I915_WRITE(FDI_RX_CHICKEN(pipe), | ||
2204 | I915_READ(FDI_RX_CHICKEN(pipe) & | ||
2205 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | ||
2206 | |||
2201 | /* still set train pattern 1 */ | 2207 | /* still set train pattern 1 */ |
2202 | reg = FDI_TX_CTL(pipe); | 2208 | reg = FDI_TX_CTL(pipe); |
2203 | temp = I915_READ(reg); | 2209 | temp = I915_READ(reg); |
@@ -3623,7 +3629,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3623 | refclk / 1000); | 3629 | refclk / 1000); |
3624 | } else if (!IS_GEN2(dev)) { | 3630 | } else if (!IS_GEN2(dev)) { |
3625 | refclk = 96000; | 3631 | refclk = 96000; |
3626 | if (HAS_PCH_SPLIT(dev)) | 3632 | if (HAS_PCH_SPLIT(dev) && |
3633 | (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base))) | ||
3627 | refclk = 120000; /* 120Mhz refclk */ | 3634 | refclk = 120000; /* 120Mhz refclk */ |
3628 | } else { | 3635 | } else { |
3629 | refclk = 48000; | 3636 | refclk = 48000; |
@@ -3685,16 +3692,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3685 | /* FDI link */ | 3692 | /* FDI link */ |
3686 | if (HAS_PCH_SPLIT(dev)) { | 3693 | if (HAS_PCH_SPLIT(dev)) { |
3687 | int lane = 0, link_bw, bpp; | 3694 | int lane = 0, link_bw, bpp; |
3688 | /* eDP doesn't require FDI link, so just set DP M/N | 3695 | /* CPU eDP doesn't require FDI link, so just set DP M/N |
3689 | according to current link config */ | 3696 | according to current link config */ |
3690 | if (has_edp_encoder) { | 3697 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { |
3691 | target_clock = mode->clock; | 3698 | target_clock = mode->clock; |
3692 | intel_edp_link_config(has_edp_encoder, | 3699 | intel_edp_link_config(has_edp_encoder, |
3693 | &lane, &link_bw); | 3700 | &lane, &link_bw); |
3694 | } else { | 3701 | } else { |
3695 | /* DP over FDI requires target mode clock | 3702 | /* [e]DP over FDI requires target mode clock |
3696 | instead of link clock */ | 3703 | instead of link clock */ |
3697 | if (is_dp) | 3704 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) |
3698 | target_clock = mode->clock; | 3705 | target_clock = mode->clock; |
3699 | else | 3706 | else |
3700 | target_clock = adjusted_mode->clock; | 3707 | target_clock = adjusted_mode->clock; |
@@ -3718,7 +3725,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3718 | temp |= PIPE_8BPC; | 3725 | temp |= PIPE_8BPC; |
3719 | else | 3726 | else |
3720 | temp |= PIPE_6BPC; | 3727 | temp |= PIPE_6BPC; |
3721 | } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { | 3728 | } else if (has_edp_encoder) { |
3722 | switch (dev_priv->edp.bpp/3) { | 3729 | switch (dev_priv->edp.bpp/3) { |
3723 | case 8: | 3730 | case 8: |
3724 | temp |= PIPE_8BPC; | 3731 | temp |= PIPE_8BPC; |
@@ -3794,13 +3801,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3794 | 3801 | ||
3795 | POSTING_READ(PCH_DREF_CONTROL); | 3802 | POSTING_READ(PCH_DREF_CONTROL); |
3796 | udelay(200); | 3803 | udelay(200); |
3804 | } | ||
3805 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
3797 | 3806 | ||
3798 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | 3807 | /* Enable CPU source on CPU attached eDP */ |
3799 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | 3808 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3809 | if (dev_priv->lvds_use_ssc) | ||
3810 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
3811 | else | ||
3812 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
3800 | } else { | 3813 | } else { |
3801 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 3814 | /* Enable SSC on PCH eDP if needed */ |
3815 | if (dev_priv->lvds_use_ssc) { | ||
3816 | DRM_ERROR("enabling SSC on PCH\n"); | ||
3817 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
3818 | } | ||
3802 | } | 3819 | } |
3803 | I915_WRITE(PCH_DREF_CONTROL, temp); | 3820 | I915_WRITE(PCH_DREF_CONTROL, temp); |
3821 | POSTING_READ(PCH_DREF_CONTROL); | ||
3822 | udelay(200); | ||
3804 | } | 3823 | } |
3805 | } | 3824 | } |
3806 | 3825 | ||
@@ -3835,7 +3854,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3835 | } | 3854 | } |
3836 | dpll |= DPLL_DVO_HIGH_SPEED; | 3855 | dpll |= DPLL_DVO_HIGH_SPEED; |
3837 | } | 3856 | } |
3838 | if (is_dp) | 3857 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) |
3839 | dpll |= DPLL_DVO_HIGH_SPEED; | 3858 | dpll |= DPLL_DVO_HIGH_SPEED; |
3840 | 3859 | ||
3841 | /* compute bitmask from p1 value */ | 3860 | /* compute bitmask from p1 value */ |
@@ -3934,7 +3953,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3934 | dpll_reg = DPLL(pipe); | 3953 | dpll_reg = DPLL(pipe); |
3935 | } | 3954 | } |
3936 | 3955 | ||
3937 | if (!has_edp_encoder) { | 3956 | /* PCH eDP needs FDI, but CPU eDP does not */ |
3957 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
3938 | I915_WRITE(fp_reg, fp); | 3958 | I915_WRITE(fp_reg, fp); |
3939 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | 3959 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); |
3940 | 3960 | ||
@@ -4011,9 +4031,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4011 | } | 4031 | } |
4012 | } | 4032 | } |
4013 | 4033 | ||
4014 | if (is_dp) | 4034 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4015 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | 4035 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
4016 | else if (HAS_PCH_SPLIT(dev)) { | 4036 | } else if (HAS_PCH_SPLIT(dev)) { |
4017 | /* For non-DP output, clear any trans DP clock recovery setting.*/ | 4037 | /* For non-DP output, clear any trans DP clock recovery setting.*/ |
4018 | if (pipe == 0) { | 4038 | if (pipe == 0) { |
4019 | I915_WRITE(TRANSA_DATA_M1, 0); | 4039 | I915_WRITE(TRANSA_DATA_M1, 0); |
@@ -4028,7 +4048,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4028 | } | 4048 | } |
4029 | } | 4049 | } |
4030 | 4050 | ||
4031 | if (!has_edp_encoder) { | 4051 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4032 | I915_WRITE(fp_reg, fp); | 4052 | I915_WRITE(fp_reg, fp); |
4033 | I915_WRITE(dpll_reg, dpll); | 4053 | I915_WRITE(dpll_reg, dpll); |
4034 | 4054 | ||
@@ -4122,29 +4142,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4122 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); | 4142 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); |
4123 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); | 4143 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); |
4124 | 4144 | ||
4125 | if (has_edp_encoder) { | 4145 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4126 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); | 4146 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
4127 | } else { | ||
4128 | /* enable FDI RX PLL too */ | ||
4129 | reg = FDI_RX_CTL(pipe); | ||
4130 | temp = I915_READ(reg); | ||
4131 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); | ||
4132 | |||
4133 | POSTING_READ(reg); | ||
4134 | udelay(200); | ||
4135 | |||
4136 | /* enable FDI TX PLL too */ | ||
4137 | reg = FDI_TX_CTL(pipe); | ||
4138 | temp = I915_READ(reg); | ||
4139 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); | ||
4140 | |||
4141 | /* enable FDI RX PCDCLK */ | ||
4142 | reg = FDI_RX_CTL(pipe); | ||
4143 | temp = I915_READ(reg); | ||
4144 | I915_WRITE(reg, temp | FDI_PCDCLK); | ||
4145 | |||
4146 | POSTING_READ(reg); | ||
4147 | udelay(200); | ||
4148 | } | 4147 | } |
4149 | } | 4148 | } |
4150 | 4149 | ||
@@ -4153,7 +4152,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4153 | 4152 | ||
4154 | intel_wait_for_vblank(dev, pipe); | 4153 | intel_wait_for_vblank(dev, pipe); |
4155 | 4154 | ||
4156 | if (IS_IRONLAKE(dev)) { | 4155 | if (IS_GEN5(dev)) { |
4157 | /* enable address swizzle for tiling buffer */ | 4156 | /* enable address swizzle for tiling buffer */ |
4158 | temp = I915_READ(DISP_ARB_CTL); | 4157 | temp = I915_READ(DISP_ARB_CTL); |
4159 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); | 4158 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); |
@@ -4992,11 +4991,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
4992 | 4991 | ||
4993 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4992 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4994 | 4993 | ||
4995 | obj_priv = to_intel_bo(work->pending_flip_obj); | 4994 | obj_priv = to_intel_bo(work->old_fb_obj); |
4996 | 4995 | atomic_clear_mask(1 << intel_crtc->plane, | |
4997 | /* Initial scanout buffer will have a 0 pending flip count */ | 4996 | &obj_priv->pending_flip.counter); |
4998 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | 4997 | if (atomic_read(&obj_priv->pending_flip) == 0) |
4999 | atomic_dec_and_test(&obj_priv->pending_flip)) | ||
5000 | wake_up(&dev_priv->pending_flip_queue); | 4998 | wake_up(&dev_priv->pending_flip_queue); |
5001 | schedule_work(&work->work); | 4999 | schedule_work(&work->work); |
5002 | 5000 | ||
@@ -5092,9 +5090,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5092 | if (ret) | 5090 | if (ret) |
5093 | goto cleanup_objs; | 5091 | goto cleanup_objs; |
5094 | 5092 | ||
5095 | obj_priv = to_intel_bo(obj); | 5093 | /* Block clients from rendering to the new back buffer until |
5096 | atomic_inc(&obj_priv->pending_flip); | 5094 | * the flip occurs and the object is no longer visible. |
5095 | */ | ||
5096 | atomic_add(1 << intel_crtc->plane, | ||
5097 | &to_intel_bo(work->old_fb_obj)->pending_flip); | ||
5098 | |||
5097 | work->pending_flip_obj = obj; | 5099 | work->pending_flip_obj = obj; |
5100 | obj_priv = to_intel_bo(obj); | ||
5098 | 5101 | ||
5099 | if (IS_GEN3(dev) || IS_GEN2(dev)) { | 5102 | if (IS_GEN3(dev) || IS_GEN2(dev)) { |
5100 | u32 flip_mask; | 5103 | u32 flip_mask; |
@@ -5736,7 +5739,7 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5736 | if (HAS_PCH_SPLIT(dev)) { | 5739 | if (HAS_PCH_SPLIT(dev)) { |
5737 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | 5740 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
5738 | 5741 | ||
5739 | if (IS_IRONLAKE(dev)) { | 5742 | if (IS_GEN5(dev)) { |
5740 | /* Required for FBC */ | 5743 | /* Required for FBC */ |
5741 | dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; | 5744 | dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; |
5742 | /* Required for CxSR */ | 5745 | /* Required for CxSR */ |
@@ -5750,13 +5753,20 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5750 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | 5753 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
5751 | 5754 | ||
5752 | /* | 5755 | /* |
5756 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
5757 | * gating for the panel power sequencer or it will fail to | ||
5758 | * start up when no ports are active. | ||
5759 | */ | ||
5760 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
5761 | |||
5762 | /* | ||
5753 | * According to the spec the following bits should be set in | 5763 | * According to the spec the following bits should be set in |
5754 | * order to enable memory self-refresh | 5764 | * order to enable memory self-refresh |
5755 | * The bit 22/21 of 0x42004 | 5765 | * The bit 22/21 of 0x42004 |
5756 | * The bit 5 of 0x42020 | 5766 | * The bit 5 of 0x42020 |
5757 | * The bit 15 of 0x45000 | 5767 | * The bit 15 of 0x45000 |
5758 | */ | 5768 | */ |
5759 | if (IS_IRONLAKE(dev)) { | 5769 | if (IS_GEN5(dev)) { |
5760 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 5770 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
5761 | (I915_READ(ILK_DISPLAY_CHICKEN2) | | 5771 | (I915_READ(ILK_DISPLAY_CHICKEN2) | |
5762 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | 5772 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); |
@@ -5932,7 +5942,7 @@ static void intel_init_display(struct drm_device *dev) | |||
5932 | 5942 | ||
5933 | /* For FIFO watermark updates */ | 5943 | /* For FIFO watermark updates */ |
5934 | if (HAS_PCH_SPLIT(dev)) { | 5944 | if (HAS_PCH_SPLIT(dev)) { |
5935 | if (IS_IRONLAKE(dev)) { | 5945 | if (IS_GEN5(dev)) { |
5936 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) | 5946 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) |
5937 | dev_priv->display.update_wm = ironlake_update_wm; | 5947 | dev_priv->display.update_wm = ironlake_update_wm; |
5938 | else { | 5948 | else { |
@@ -6131,6 +6141,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6131 | drm_kms_helper_poll_fini(dev); | 6141 | drm_kms_helper_poll_fini(dev); |
6132 | mutex_lock(&dev->struct_mutex); | 6142 | mutex_lock(&dev->struct_mutex); |
6133 | 6143 | ||
6144 | intel_unregister_dsm_handler(); | ||
6145 | |||
6146 | |||
6134 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 6147 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
6135 | /* Skip inactive CRTCs */ | 6148 | /* Skip inactive CRTCs */ |
6136 | if (!crtc->fb) | 6149 | if (!crtc->fb) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 152d94507b7..891f4f1d63b 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -42,15 +42,13 @@ | |||
42 | 42 | ||
43 | #define DP_LINK_CONFIGURATION_SIZE 9 | 43 | #define DP_LINK_CONFIGURATION_SIZE 9 |
44 | 44 | ||
45 | #define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP) | ||
46 | #define IS_PCH_eDP(i) ((i)->is_pch_edp) | ||
47 | |||
48 | struct intel_dp { | 45 | struct intel_dp { |
49 | struct intel_encoder base; | 46 | struct intel_encoder base; |
50 | uint32_t output_reg; | 47 | uint32_t output_reg; |
51 | uint32_t DP; | 48 | uint32_t DP; |
52 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | 49 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; |
53 | bool has_audio; | 50 | bool has_audio; |
51 | int force_audio; | ||
54 | int dpms_mode; | 52 | int dpms_mode; |
55 | uint8_t link_bw; | 53 | uint8_t link_bw; |
56 | uint8_t lane_count; | 54 | uint8_t lane_count; |
@@ -60,8 +58,35 @@ struct intel_dp { | |||
60 | bool is_pch_edp; | 58 | bool is_pch_edp; |
61 | uint8_t train_set[4]; | 59 | uint8_t train_set[4]; |
62 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 60 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
61 | |||
62 | struct drm_property *force_audio_property; | ||
63 | }; | 63 | }; |
64 | 64 | ||
65 | /** | ||
66 | * is_edp - is the given port attached to an eDP panel (either CPU or PCH) | ||
67 | * @intel_dp: DP struct | ||
68 | * | ||
69 | * If a CPU or PCH DP output is attached to an eDP panel, this function | ||
70 | * will return true, and false otherwise. | ||
71 | */ | ||
72 | static bool is_edp(struct intel_dp *intel_dp) | ||
73 | { | ||
74 | return intel_dp->base.type == INTEL_OUTPUT_EDP; | ||
75 | } | ||
76 | |||
77 | /** | ||
78 | * is_pch_edp - is the port on the PCH and attached to an eDP panel? | ||
79 | * @intel_dp: DP struct | ||
80 | * | ||
81 | * Returns true if the given DP struct corresponds to a PCH DP port attached | ||
82 | * to an eDP panel, false otherwise. Helpful for determining whether we | ||
83 | * may need FDI resources for a given DP output or not. | ||
84 | */ | ||
85 | static bool is_pch_edp(struct intel_dp *intel_dp) | ||
86 | { | ||
87 | return intel_dp->is_pch_edp; | ||
88 | } | ||
89 | |||
65 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) | 90 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) |
66 | { | 91 | { |
67 | return container_of(encoder, struct intel_dp, base.base); | 92 | return container_of(encoder, struct intel_dp, base.base); |
@@ -73,6 +98,25 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector) | |||
73 | struct intel_dp, base); | 98 | struct intel_dp, base); |
74 | } | 99 | } |
75 | 100 | ||
101 | /** | ||
102 | * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? | ||
103 | * @encoder: DRM encoder | ||
104 | * | ||
105 | * Return true if @encoder corresponds to a PCH attached eDP panel. Needed | ||
106 | * by intel_display.c. | ||
107 | */ | ||
108 | bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) | ||
109 | { | ||
110 | struct intel_dp *intel_dp; | ||
111 | |||
112 | if (!encoder) | ||
113 | return false; | ||
114 | |||
115 | intel_dp = enc_to_intel_dp(encoder); | ||
116 | |||
117 | return is_pch_edp(intel_dp); | ||
118 | } | ||
119 | |||
76 | static void intel_dp_start_link_train(struct intel_dp *intel_dp); | 120 | static void intel_dp_start_link_train(struct intel_dp *intel_dp); |
77 | static void intel_dp_complete_link_train(struct intel_dp *intel_dp); | 121 | static void intel_dp_complete_link_train(struct intel_dp *intel_dp); |
78 | static void intel_dp_link_down(struct intel_dp *intel_dp); | 122 | static void intel_dp_link_down(struct intel_dp *intel_dp); |
@@ -138,7 +182,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi | |||
138 | { | 182 | { |
139 | struct drm_i915_private *dev_priv = dev->dev_private; | 183 | struct drm_i915_private *dev_priv = dev->dev_private; |
140 | 184 | ||
141 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 185 | if (is_edp(intel_dp)) |
142 | return (pixel_clock * dev_priv->edp.bpp + 7) / 8; | 186 | return (pixel_clock * dev_priv->edp.bpp + 7) / 8; |
143 | else | 187 | else |
144 | return pixel_clock * 3; | 188 | return pixel_clock * 3; |
@@ -160,8 +204,7 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
160 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); | 204 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); |
161 | int max_lanes = intel_dp_max_lane_count(intel_dp); | 205 | int max_lanes = intel_dp_max_lane_count(intel_dp); |
162 | 206 | ||
163 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && | 207 | if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { |
164 | dev_priv->panel_fixed_mode) { | ||
165 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) | 208 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) |
166 | return MODE_PANEL; | 209 | return MODE_PANEL; |
167 | 210 | ||
@@ -171,7 +214,7 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
171 | 214 | ||
172 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels | 215 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels |
173 | which are outside spec tolerances but somehow work by magic */ | 216 | which are outside spec tolerances but somehow work by magic */ |
174 | if (!IS_eDP(intel_dp) && | 217 | if (!is_edp(intel_dp) && |
175 | (intel_dp_link_required(connector->dev, intel_dp, mode->clock) | 218 | (intel_dp_link_required(connector->dev, intel_dp, mode->clock) |
176 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) | 219 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) |
177 | return MODE_CLOCK_HIGH; | 220 | return MODE_CLOCK_HIGH; |
@@ -258,7 +301,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
258 | * Note that PCH attached eDP panels should use a 125MHz input | 301 | * Note that PCH attached eDP panels should use a 125MHz input |
259 | * clock divider. | 302 | * clock divider. |
260 | */ | 303 | */ |
261 | if (IS_eDP(intel_dp) && !IS_PCH_eDP(intel_dp)) { | 304 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { |
262 | if (IS_GEN6(dev)) | 305 | if (IS_GEN6(dev)) |
263 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ | 306 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ |
264 | else | 307 | else |
@@ -530,8 +573,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
530 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; | 573 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; |
531 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 574 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
532 | 575 | ||
533 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && | 576 | if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { |
534 | dev_priv->panel_fixed_mode) { | ||
535 | intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); | 577 | intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); |
536 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, | 578 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, |
537 | mode, adjusted_mode); | 579 | mode, adjusted_mode); |
@@ -542,6 +584,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
542 | mode->clock = dev_priv->panel_fixed_mode->clock; | 584 | mode->clock = dev_priv->panel_fixed_mode->clock; |
543 | } | 585 | } |
544 | 586 | ||
587 | /* Just use VBT values for eDP */ | ||
588 | if (is_edp(intel_dp)) { | ||
589 | intel_dp->lane_count = dev_priv->edp.lanes; | ||
590 | intel_dp->link_bw = dev_priv->edp.rate; | ||
591 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); | ||
592 | DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n", | ||
593 | intel_dp->link_bw, intel_dp->lane_count, | ||
594 | adjusted_mode->clock); | ||
595 | return true; | ||
596 | } | ||
597 | |||
545 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 598 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
546 | for (clock = 0; clock <= max_clock; clock++) { | 599 | for (clock = 0; clock <= max_clock; clock++) { |
547 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); | 600 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
@@ -560,19 +613,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
560 | } | 613 | } |
561 | } | 614 | } |
562 | 615 | ||
563 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { | ||
564 | /* okay we failed just pick the highest */ | ||
565 | intel_dp->lane_count = max_lane_count; | ||
566 | intel_dp->link_bw = bws[max_clock]; | ||
567 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); | ||
568 | DRM_DEBUG_KMS("Force picking display port link bw %02x lane " | ||
569 | "count %d clock %d\n", | ||
570 | intel_dp->link_bw, intel_dp->lane_count, | ||
571 | adjusted_mode->clock); | ||
572 | |||
573 | return true; | ||
574 | } | ||
575 | |||
576 | return false; | 616 | return false; |
577 | } | 617 | } |
578 | 618 | ||
@@ -609,25 +649,6 @@ intel_dp_compute_m_n(int bpp, | |||
609 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); | 649 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); |
610 | } | 650 | } |
611 | 651 | ||
612 | bool intel_pch_has_edp(struct drm_crtc *crtc) | ||
613 | { | ||
614 | struct drm_device *dev = crtc->dev; | ||
615 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
616 | struct drm_encoder *encoder; | ||
617 | |||
618 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | ||
619 | struct intel_dp *intel_dp; | ||
620 | |||
621 | if (encoder->crtc != crtc) | ||
622 | continue; | ||
623 | |||
624 | intel_dp = enc_to_intel_dp(encoder); | ||
625 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) | ||
626 | return intel_dp->is_pch_edp; | ||
627 | } | ||
628 | return false; | ||
629 | } | ||
630 | |||
631 | void | 652 | void |
632 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 653 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
633 | struct drm_display_mode *adjusted_mode) | 654 | struct drm_display_mode *adjusted_mode) |
@@ -652,8 +673,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
652 | intel_dp = enc_to_intel_dp(encoder); | 673 | intel_dp = enc_to_intel_dp(encoder); |
653 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { | 674 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { |
654 | lane_count = intel_dp->lane_count; | 675 | lane_count = intel_dp->lane_count; |
655 | if (IS_PCH_eDP(intel_dp)) | 676 | break; |
656 | bpp = dev_priv->edp.bpp; | 677 | } else if (is_edp(intel_dp)) { |
678 | lane_count = dev_priv->edp.lanes; | ||
679 | bpp = dev_priv->edp.bpp; | ||
657 | break; | 680 | break; |
658 | } | 681 | } |
659 | } | 682 | } |
@@ -720,7 +743,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
720 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 743 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
721 | intel_dp->DP |= DP_SYNC_VS_HIGH; | 744 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
722 | 745 | ||
723 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 746 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
724 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; | 747 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
725 | else | 748 | else |
726 | intel_dp->DP |= DP_LINK_TRAIN_OFF; | 749 | intel_dp->DP |= DP_LINK_TRAIN_OFF; |
@@ -755,7 +778,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
755 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) | 778 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) |
756 | intel_dp->DP |= DP_PIPEB_SELECT; | 779 | intel_dp->DP |= DP_PIPEB_SELECT; |
757 | 780 | ||
758 | if (IS_eDP(intel_dp)) { | 781 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { |
759 | /* don't miss out required setting for eDP */ | 782 | /* don't miss out required setting for eDP */ |
760 | intel_dp->DP |= DP_PLL_ENABLE; | 783 | intel_dp->DP |= DP_PLL_ENABLE; |
761 | if (adjusted_mode->clock < 200000) | 784 | if (adjusted_mode->clock < 200000) |
@@ -766,10 +789,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
766 | } | 789 | } |
767 | 790 | ||
768 | /* Returns true if the panel was already on when called */ | 791 | /* Returns true if the panel was already on when called */ |
769 | static bool ironlake_edp_panel_on (struct drm_device *dev) | 792 | static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) |
770 | { | 793 | { |
794 | struct drm_device *dev = intel_dp->base.base.dev; | ||
771 | struct drm_i915_private *dev_priv = dev->dev_private; | 795 | struct drm_i915_private *dev_priv = dev->dev_private; |
772 | u32 pp; | 796 | u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; |
773 | 797 | ||
774 | if (I915_READ(PCH_PP_STATUS) & PP_ON) | 798 | if (I915_READ(PCH_PP_STATUS) & PP_ON) |
775 | return true; | 799 | return true; |
@@ -781,19 +805,20 @@ static bool ironlake_edp_panel_on (struct drm_device *dev) | |||
781 | I915_WRITE(PCH_PP_CONTROL, pp); | 805 | I915_WRITE(PCH_PP_CONTROL, pp); |
782 | POSTING_READ(PCH_PP_CONTROL); | 806 | POSTING_READ(PCH_PP_CONTROL); |
783 | 807 | ||
784 | pp |= POWER_TARGET_ON; | 808 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; |
785 | I915_WRITE(PCH_PP_CONTROL, pp); | 809 | I915_WRITE(PCH_PP_CONTROL, pp); |
810 | POSTING_READ(PCH_PP_CONTROL); | ||
786 | 811 | ||
787 | /* Ouch. We need to wait here for some panels, like Dell e6510 | 812 | /* Ouch. We need to wait here for some panels, like Dell e6510 |
788 | * https://bugs.freedesktop.org/show_bug.cgi?id=29278i | 813 | * https://bugs.freedesktop.org/show_bug.cgi?id=29278i |
789 | */ | 814 | */ |
790 | msleep(300); | 815 | msleep(300); |
791 | 816 | ||
792 | if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000)) | 817 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, |
818 | 5000)) | ||
793 | DRM_ERROR("panel on wait timed out: 0x%08x\n", | 819 | DRM_ERROR("panel on wait timed out: 0x%08x\n", |
794 | I915_READ(PCH_PP_STATUS)); | 820 | I915_READ(PCH_PP_STATUS)); |
795 | 821 | ||
796 | pp &= ~(PANEL_UNLOCK_REGS); | ||
797 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 822 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
798 | I915_WRITE(PCH_PP_CONTROL, pp); | 823 | I915_WRITE(PCH_PP_CONTROL, pp); |
799 | POSTING_READ(PCH_PP_CONTROL); | 824 | POSTING_READ(PCH_PP_CONTROL); |
@@ -804,7 +829,8 @@ static bool ironlake_edp_panel_on (struct drm_device *dev) | |||
804 | static void ironlake_edp_panel_off (struct drm_device *dev) | 829 | static void ironlake_edp_panel_off (struct drm_device *dev) |
805 | { | 830 | { |
806 | struct drm_i915_private *dev_priv = dev->dev_private; | 831 | struct drm_i915_private *dev_priv = dev->dev_private; |
807 | u32 pp; | 832 | u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | |
833 | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; | ||
808 | 834 | ||
809 | pp = I915_READ(PCH_PP_CONTROL); | 835 | pp = I915_READ(PCH_PP_CONTROL); |
810 | 836 | ||
@@ -815,12 +841,12 @@ static void ironlake_edp_panel_off (struct drm_device *dev) | |||
815 | 841 | ||
816 | pp &= ~POWER_TARGET_ON; | 842 | pp &= ~POWER_TARGET_ON; |
817 | I915_WRITE(PCH_PP_CONTROL, pp); | 843 | I915_WRITE(PCH_PP_CONTROL, pp); |
844 | POSTING_READ(PCH_PP_CONTROL); | ||
818 | 845 | ||
819 | if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000)) | 846 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) |
820 | DRM_ERROR("panel off wait timed out: 0x%08x\n", | 847 | DRM_ERROR("panel off wait timed out: 0x%08x\n", |
821 | I915_READ(PCH_PP_STATUS)); | 848 | I915_READ(PCH_PP_STATUS)); |
822 | 849 | ||
823 | /* Make sure VDD is enabled so DP AUX will work */ | ||
824 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 850 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
825 | I915_WRITE(PCH_PP_CONTROL, pp); | 851 | I915_WRITE(PCH_PP_CONTROL, pp); |
826 | POSTING_READ(PCH_PP_CONTROL); | 852 | POSTING_READ(PCH_PP_CONTROL); |
@@ -831,36 +857,19 @@ static void ironlake_edp_panel_off (struct drm_device *dev) | |||
831 | msleep(300); | 857 | msleep(300); |
832 | } | 858 | } |
833 | 859 | ||
834 | static void ironlake_edp_panel_vdd_on(struct drm_device *dev) | ||
835 | { | ||
836 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
837 | u32 pp; | ||
838 | |||
839 | pp = I915_READ(PCH_PP_CONTROL); | ||
840 | pp |= EDP_FORCE_VDD; | ||
841 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
842 | POSTING_READ(PCH_PP_CONTROL); | ||
843 | msleep(300); | ||
844 | } | ||
845 | |||
846 | static void ironlake_edp_panel_vdd_off(struct drm_device *dev) | ||
847 | { | ||
848 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
849 | u32 pp; | ||
850 | |||
851 | pp = I915_READ(PCH_PP_CONTROL); | ||
852 | pp &= ~EDP_FORCE_VDD; | ||
853 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
854 | POSTING_READ(PCH_PP_CONTROL); | ||
855 | msleep(300); | ||
856 | } | ||
857 | |||
858 | static void ironlake_edp_backlight_on (struct drm_device *dev) | 860 | static void ironlake_edp_backlight_on (struct drm_device *dev) |
859 | { | 861 | { |
860 | struct drm_i915_private *dev_priv = dev->dev_private; | 862 | struct drm_i915_private *dev_priv = dev->dev_private; |
861 | u32 pp; | 863 | u32 pp; |
862 | 864 | ||
863 | DRM_DEBUG_KMS("\n"); | 865 | DRM_DEBUG_KMS("\n"); |
866 | /* | ||
867 | * If we enable the backlight right away following a panel power | ||
868 | * on, we may see slight flicker as the panel syncs with the eDP | ||
869 | * link. So delay a bit to make sure the image is solid before | ||
870 | * allowing it to appear. | ||
871 | */ | ||
872 | msleep(300); | ||
864 | pp = I915_READ(PCH_PP_CONTROL); | 873 | pp = I915_READ(PCH_PP_CONTROL); |
865 | pp |= EDP_BLC_ENABLE; | 874 | pp |= EDP_BLC_ENABLE; |
866 | I915_WRITE(PCH_PP_CONTROL, pp); | 875 | I915_WRITE(PCH_PP_CONTROL, pp); |
@@ -885,8 +894,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder) | |||
885 | 894 | ||
886 | DRM_DEBUG_KMS("\n"); | 895 | DRM_DEBUG_KMS("\n"); |
887 | dpa_ctl = I915_READ(DP_A); | 896 | dpa_ctl = I915_READ(DP_A); |
888 | dpa_ctl &= ~DP_PLL_ENABLE; | 897 | dpa_ctl |= DP_PLL_ENABLE; |
889 | I915_WRITE(DP_A, dpa_ctl); | 898 | I915_WRITE(DP_A, dpa_ctl); |
899 | POSTING_READ(DP_A); | ||
900 | udelay(200); | ||
890 | } | 901 | } |
891 | 902 | ||
892 | static void ironlake_edp_pll_off(struct drm_encoder *encoder) | 903 | static void ironlake_edp_pll_off(struct drm_encoder *encoder) |
@@ -896,7 +907,7 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder) | |||
896 | u32 dpa_ctl; | 907 | u32 dpa_ctl; |
897 | 908 | ||
898 | dpa_ctl = I915_READ(DP_A); | 909 | dpa_ctl = I915_READ(DP_A); |
899 | dpa_ctl |= DP_PLL_ENABLE; | 910 | dpa_ctl &= ~DP_PLL_ENABLE; |
900 | I915_WRITE(DP_A, dpa_ctl); | 911 | I915_WRITE(DP_A, dpa_ctl); |
901 | POSTING_READ(DP_A); | 912 | POSTING_READ(DP_A); |
902 | udelay(200); | 913 | udelay(200); |
@@ -906,17 +917,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder) | |||
906 | { | 917 | { |
907 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 918 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
908 | struct drm_device *dev = encoder->dev; | 919 | struct drm_device *dev = encoder->dev; |
909 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
910 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | ||
911 | 920 | ||
912 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { | 921 | if (is_edp(intel_dp)) { |
913 | ironlake_edp_panel_off(dev); | ||
914 | ironlake_edp_backlight_off(dev); | 922 | ironlake_edp_backlight_off(dev); |
915 | ironlake_edp_panel_vdd_on(dev); | 923 | ironlake_edp_panel_on(intel_dp); |
916 | ironlake_edp_pll_on(encoder); | 924 | if (!is_pch_edp(intel_dp)) |
925 | ironlake_edp_pll_on(encoder); | ||
926 | else | ||
927 | ironlake_edp_pll_off(encoder); | ||
917 | } | 928 | } |
918 | if (dp_reg & DP_PORT_EN) | 929 | intel_dp_link_down(intel_dp); |
919 | intel_dp_link_down(intel_dp); | ||
920 | } | 930 | } |
921 | 931 | ||
922 | static void intel_dp_commit(struct drm_encoder *encoder) | 932 | static void intel_dp_commit(struct drm_encoder *encoder) |
@@ -926,14 +936,13 @@ static void intel_dp_commit(struct drm_encoder *encoder) | |||
926 | 936 | ||
927 | intel_dp_start_link_train(intel_dp); | 937 | intel_dp_start_link_train(intel_dp); |
928 | 938 | ||
929 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 939 | if (is_edp(intel_dp)) |
930 | ironlake_edp_panel_on(dev); | 940 | ironlake_edp_panel_on(intel_dp); |
931 | 941 | ||
932 | intel_dp_complete_link_train(intel_dp); | 942 | intel_dp_complete_link_train(intel_dp); |
933 | 943 | ||
934 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 944 | if (is_edp(intel_dp)) |
935 | ironlake_edp_backlight_on(dev); | 945 | ironlake_edp_backlight_on(dev); |
936 | intel_dp->dpms_mode = DRM_MODE_DPMS_ON; | ||
937 | } | 946 | } |
938 | 947 | ||
939 | static void | 948 | static void |
@@ -945,23 +954,22 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
945 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | 954 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
946 | 955 | ||
947 | if (mode != DRM_MODE_DPMS_ON) { | 956 | if (mode != DRM_MODE_DPMS_ON) { |
948 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { | 957 | if (is_edp(intel_dp)) |
949 | ironlake_edp_backlight_off(dev); | 958 | ironlake_edp_backlight_off(dev); |
959 | intel_dp_link_down(intel_dp); | ||
960 | if (is_edp(intel_dp)) | ||
950 | ironlake_edp_panel_off(dev); | 961 | ironlake_edp_panel_off(dev); |
951 | } | 962 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) |
952 | if (dp_reg & DP_PORT_EN) | ||
953 | intel_dp_link_down(intel_dp); | ||
954 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | ||
955 | ironlake_edp_pll_off(encoder); | 963 | ironlake_edp_pll_off(encoder); |
956 | } else { | 964 | } else { |
965 | if (is_edp(intel_dp)) | ||
966 | ironlake_edp_panel_on(intel_dp); | ||
957 | if (!(dp_reg & DP_PORT_EN)) { | 967 | if (!(dp_reg & DP_PORT_EN)) { |
958 | intel_dp_start_link_train(intel_dp); | 968 | intel_dp_start_link_train(intel_dp); |
959 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | ||
960 | ironlake_edp_panel_on(dev); | ||
961 | intel_dp_complete_link_train(intel_dp); | 969 | intel_dp_complete_link_train(intel_dp); |
962 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | ||
963 | ironlake_edp_backlight_on(dev); | ||
964 | } | 970 | } |
971 | if (is_edp(intel_dp)) | ||
972 | ironlake_edp_backlight_on(dev); | ||
965 | } | 973 | } |
966 | intel_dp->dpms_mode = mode; | 974 | intel_dp->dpms_mode = mode; |
967 | } | 975 | } |
@@ -1079,11 +1087,21 @@ intel_get_adjust_train(struct intel_dp *intel_dp) | |||
1079 | } | 1087 | } |
1080 | 1088 | ||
1081 | static uint32_t | 1089 | static uint32_t |
1082 | intel_dp_signal_levels(uint8_t train_set, int lane_count) | 1090 | intel_dp_signal_levels(struct intel_dp *intel_dp) |
1083 | { | 1091 | { |
1084 | uint32_t signal_levels = 0; | 1092 | struct drm_device *dev = intel_dp->base.base.dev; |
1093 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1094 | uint32_t signal_levels = 0; | ||
1095 | u8 train_set = intel_dp->train_set[0]; | ||
1096 | u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
1097 | u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK; | ||
1098 | |||
1099 | if (is_edp(intel_dp)) { | ||
1100 | vswing = dev_priv->edp.vswing; | ||
1101 | preemphasis = dev_priv->edp.preemphasis; | ||
1102 | } | ||
1085 | 1103 | ||
1086 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { | 1104 | switch (vswing) { |
1087 | case DP_TRAIN_VOLTAGE_SWING_400: | 1105 | case DP_TRAIN_VOLTAGE_SWING_400: |
1088 | default: | 1106 | default: |
1089 | signal_levels |= DP_VOLTAGE_0_4; | 1107 | signal_levels |= DP_VOLTAGE_0_4; |
@@ -1098,7 +1116,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) | |||
1098 | signal_levels |= DP_VOLTAGE_1_2; | 1116 | signal_levels |= DP_VOLTAGE_1_2; |
1099 | break; | 1117 | break; |
1100 | } | 1118 | } |
1101 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { | 1119 | switch (preemphasis) { |
1102 | case DP_TRAIN_PRE_EMPHASIS_0: | 1120 | case DP_TRAIN_PRE_EMPHASIS_0: |
1103 | default: | 1121 | default: |
1104 | signal_levels |= DP_PRE_EMPHASIS_0; | 1122 | signal_levels |= DP_PRE_EMPHASIS_0; |
@@ -1185,6 +1203,18 @@ intel_channel_eq_ok(struct intel_dp *intel_dp) | |||
1185 | } | 1203 | } |
1186 | 1204 | ||
1187 | static bool | 1205 | static bool |
1206 | intel_dp_aux_handshake_required(struct intel_dp *intel_dp) | ||
1207 | { | ||
1208 | struct drm_device *dev = intel_dp->base.base.dev; | ||
1209 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1210 | |||
1211 | if (is_edp(intel_dp) && dev_priv->no_aux_handshake) | ||
1212 | return false; | ||
1213 | |||
1214 | return true; | ||
1215 | } | ||
1216 | |||
1217 | static bool | ||
1188 | intel_dp_set_link_train(struct intel_dp *intel_dp, | 1218 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
1189 | uint32_t dp_reg_value, | 1219 | uint32_t dp_reg_value, |
1190 | uint8_t dp_train_pat) | 1220 | uint8_t dp_train_pat) |
@@ -1196,6 +1226,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
1196 | I915_WRITE(intel_dp->output_reg, dp_reg_value); | 1226 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
1197 | POSTING_READ(intel_dp->output_reg); | 1227 | POSTING_READ(intel_dp->output_reg); |
1198 | 1228 | ||
1229 | if (!intel_dp_aux_handshake_required(intel_dp)) | ||
1230 | return true; | ||
1231 | |||
1199 | intel_dp_aux_native_write_1(intel_dp, | 1232 | intel_dp_aux_native_write_1(intel_dp, |
1200 | DP_TRAINING_PATTERN_SET, | 1233 | DP_TRAINING_PATTERN_SET, |
1201 | dp_train_pat); | 1234 | dp_train_pat); |
@@ -1228,13 +1261,14 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1228 | POSTING_READ(intel_dp->output_reg); | 1261 | POSTING_READ(intel_dp->output_reg); |
1229 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1262 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1230 | 1263 | ||
1231 | /* Write the link configuration data */ | 1264 | if (intel_dp_aux_handshake_required(intel_dp)) |
1232 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 1265 | /* Write the link configuration data */ |
1233 | intel_dp->link_configuration, | 1266 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
1234 | DP_LINK_CONFIGURATION_SIZE); | 1267 | intel_dp->link_configuration, |
1268 | DP_LINK_CONFIGURATION_SIZE); | ||
1235 | 1269 | ||
1236 | DP |= DP_PORT_EN; | 1270 | DP |= DP_PORT_EN; |
1237 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1271 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1238 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1272 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1239 | else | 1273 | else |
1240 | DP &= ~DP_LINK_TRAIN_MASK; | 1274 | DP &= ~DP_LINK_TRAIN_MASK; |
@@ -1245,15 +1279,15 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1245 | for (;;) { | 1279 | for (;;) { |
1246 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ | 1280 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1247 | uint32_t signal_levels; | 1281 | uint32_t signal_levels; |
1248 | if (IS_GEN6(dev) && IS_eDP(intel_dp)) { | 1282 | if (IS_GEN6(dev) && is_edp(intel_dp)) { |
1249 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | 1283 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1250 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1284 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1251 | } else { | 1285 | } else { |
1252 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); | 1286 | signal_levels = intel_dp_signal_levels(intel_dp); |
1253 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1287 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1254 | } | 1288 | } |
1255 | 1289 | ||
1256 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1290 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1257 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; | 1291 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; |
1258 | else | 1292 | else |
1259 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1293 | reg = DP | DP_LINK_TRAIN_PAT_1; |
@@ -1263,33 +1297,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1263 | break; | 1297 | break; |
1264 | /* Set training pattern 1 */ | 1298 | /* Set training pattern 1 */ |
1265 | 1299 | ||
1266 | udelay(100); | 1300 | udelay(500); |
1267 | if (!intel_dp_get_link_status(intel_dp)) | 1301 | if (intel_dp_aux_handshake_required(intel_dp)) { |
1268 | break; | ||
1269 | |||
1270 | if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { | ||
1271 | clock_recovery = true; | ||
1272 | break; | 1302 | break; |
1273 | } | 1303 | } else { |
1304 | if (!intel_dp_get_link_status(intel_dp)) | ||
1305 | break; | ||
1274 | 1306 | ||
1275 | /* Check to see if we've tried the max voltage */ | 1307 | if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { |
1276 | for (i = 0; i < intel_dp->lane_count; i++) | 1308 | clock_recovery = true; |
1277 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | ||
1278 | break; | 1309 | break; |
1279 | if (i == intel_dp->lane_count) | 1310 | } |
1280 | break; | ||
1281 | 1311 | ||
1282 | /* Check to see if we've tried the same voltage 5 times */ | 1312 | /* Check to see if we've tried the max voltage */ |
1283 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | 1313 | for (i = 0; i < intel_dp->lane_count; i++) |
1284 | ++tries; | 1314 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
1285 | if (tries == 5) | 1315 | break; |
1316 | if (i == intel_dp->lane_count) | ||
1286 | break; | 1317 | break; |
1287 | } else | ||
1288 | tries = 0; | ||
1289 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
1290 | 1318 | ||
1291 | /* Compute new intel_dp->train_set as requested by target */ | 1319 | /* Check to see if we've tried the same voltage 5 times */ |
1292 | intel_get_adjust_train(intel_dp); | 1320 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
1321 | ++tries; | ||
1322 | if (tries == 5) | ||
1323 | break; | ||
1324 | } else | ||
1325 | tries = 0; | ||
1326 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
1327 | |||
1328 | /* Compute new intel_dp->train_set as requested by target */ | ||
1329 | intel_get_adjust_train(intel_dp); | ||
1330 | } | ||
1293 | } | 1331 | } |
1294 | 1332 | ||
1295 | intel_dp->DP = DP; | 1333 | intel_dp->DP = DP; |
@@ -1312,15 +1350,15 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1312 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ | 1350 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1313 | uint32_t signal_levels; | 1351 | uint32_t signal_levels; |
1314 | 1352 | ||
1315 | if (IS_GEN6(dev) && IS_eDP(intel_dp)) { | 1353 | if (IS_GEN6(dev) && is_edp(intel_dp)) { |
1316 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | 1354 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1317 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1355 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1318 | } else { | 1356 | } else { |
1319 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); | 1357 | signal_levels = intel_dp_signal_levels(intel_dp); |
1320 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1358 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1321 | } | 1359 | } |
1322 | 1360 | ||
1323 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1361 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1324 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; | 1362 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; |
1325 | else | 1363 | else |
1326 | reg = DP | DP_LINK_TRAIN_PAT_2; | 1364 | reg = DP | DP_LINK_TRAIN_PAT_2; |
@@ -1330,25 +1368,29 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1330 | DP_TRAINING_PATTERN_2)) | 1368 | DP_TRAINING_PATTERN_2)) |
1331 | break; | 1369 | break; |
1332 | 1370 | ||
1333 | udelay(400); | 1371 | udelay(500); |
1334 | if (!intel_dp_get_link_status(intel_dp)) | ||
1335 | break; | ||
1336 | 1372 | ||
1337 | if (intel_channel_eq_ok(intel_dp)) { | 1373 | if (!intel_dp_aux_handshake_required(intel_dp)) { |
1338 | channel_eq = true; | ||
1339 | break; | 1374 | break; |
1340 | } | 1375 | } else { |
1376 | if (!intel_dp_get_link_status(intel_dp)) | ||
1377 | break; | ||
1341 | 1378 | ||
1342 | /* Try 5 times */ | 1379 | if (intel_channel_eq_ok(intel_dp)) { |
1343 | if (tries > 5) | 1380 | channel_eq = true; |
1344 | break; | 1381 | break; |
1382 | } | ||
1345 | 1383 | ||
1346 | /* Compute new intel_dp->train_set as requested by target */ | 1384 | /* Try 5 times */ |
1347 | intel_get_adjust_train(intel_dp); | 1385 | if (tries > 5) |
1348 | ++tries; | 1386 | break; |
1349 | } | ||
1350 | 1387 | ||
1351 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1388 | /* Compute new intel_dp->train_set as requested by target */ |
1389 | intel_get_adjust_train(intel_dp); | ||
1390 | ++tries; | ||
1391 | } | ||
1392 | } | ||
1393 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | ||
1352 | reg = DP | DP_LINK_TRAIN_OFF_CPT; | 1394 | reg = DP | DP_LINK_TRAIN_OFF_CPT; |
1353 | else | 1395 | else |
1354 | reg = DP | DP_LINK_TRAIN_OFF; | 1396 | reg = DP | DP_LINK_TRAIN_OFF; |
@@ -1368,14 +1410,14 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1368 | 1410 | ||
1369 | DRM_DEBUG_KMS("\n"); | 1411 | DRM_DEBUG_KMS("\n"); |
1370 | 1412 | ||
1371 | if (IS_eDP(intel_dp)) { | 1413 | if (is_edp(intel_dp)) { |
1372 | DP &= ~DP_PLL_ENABLE; | 1414 | DP &= ~DP_PLL_ENABLE; |
1373 | I915_WRITE(intel_dp->output_reg, DP); | 1415 | I915_WRITE(intel_dp->output_reg, DP); |
1374 | POSTING_READ(intel_dp->output_reg); | 1416 | POSTING_READ(intel_dp->output_reg); |
1375 | udelay(100); | 1417 | udelay(100); |
1376 | } | 1418 | } |
1377 | 1419 | ||
1378 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) { | 1420 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) { |
1379 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1421 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1380 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); | 1422 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); |
1381 | } else { | 1423 | } else { |
@@ -1386,7 +1428,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1386 | 1428 | ||
1387 | msleep(17); | 1429 | msleep(17); |
1388 | 1430 | ||
1389 | if (IS_eDP(intel_dp)) | 1431 | if (is_edp(intel_dp)) |
1390 | DP |= DP_LINK_TRAIN_OFF; | 1432 | DP |= DP_LINK_TRAIN_OFF; |
1391 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); | 1433 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
1392 | POSTING_READ(intel_dp->output_reg); | 1434 | POSTING_READ(intel_dp->output_reg); |
@@ -1419,48 +1461,34 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
1419 | } | 1461 | } |
1420 | 1462 | ||
1421 | static enum drm_connector_status | 1463 | static enum drm_connector_status |
1422 | ironlake_dp_detect(struct drm_connector *connector) | 1464 | ironlake_dp_detect(struct intel_dp *intel_dp) |
1423 | { | 1465 | { |
1424 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1425 | enum drm_connector_status status; | 1466 | enum drm_connector_status status; |
1426 | 1467 | ||
1427 | /* Panel needs power for AUX to work */ | 1468 | /* Can't disconnect eDP */ |
1428 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 1469 | if (is_edp(intel_dp)) |
1429 | ironlake_edp_panel_vdd_on(connector->dev); | 1470 | return connector_status_connected; |
1471 | |||
1430 | status = connector_status_disconnected; | 1472 | status = connector_status_disconnected; |
1431 | if (intel_dp_aux_native_read(intel_dp, | 1473 | if (intel_dp_aux_native_read(intel_dp, |
1432 | 0x000, intel_dp->dpcd, | 1474 | 0x000, intel_dp->dpcd, |
1433 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) | 1475 | sizeof (intel_dp->dpcd)) |
1434 | { | 1476 | == sizeof(intel_dp->dpcd)) { |
1435 | if (intel_dp->dpcd[0] != 0) | 1477 | if (intel_dp->dpcd[0] != 0) |
1436 | status = connector_status_connected; | 1478 | status = connector_status_connected; |
1437 | } | 1479 | } |
1438 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], | 1480 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], |
1439 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); | 1481 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); |
1440 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | ||
1441 | ironlake_edp_panel_vdd_off(connector->dev); | ||
1442 | return status; | 1482 | return status; |
1443 | } | 1483 | } |
1444 | 1484 | ||
1445 | /** | ||
1446 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. | ||
1447 | * | ||
1448 | * \return true if DP port is connected. | ||
1449 | * \return false if DP port is disconnected. | ||
1450 | */ | ||
1451 | static enum drm_connector_status | 1485 | static enum drm_connector_status |
1452 | intel_dp_detect(struct drm_connector *connector, bool force) | 1486 | g4x_dp_detect(struct intel_dp *intel_dp) |
1453 | { | 1487 | { |
1454 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1455 | struct drm_device *dev = intel_dp->base.base.dev; | 1488 | struct drm_device *dev = intel_dp->base.base.dev; |
1456 | struct drm_i915_private *dev_priv = dev->dev_private; | 1489 | struct drm_i915_private *dev_priv = dev->dev_private; |
1457 | uint32_t temp, bit; | ||
1458 | enum drm_connector_status status; | 1490 | enum drm_connector_status status; |
1459 | 1491 | uint32_t temp, bit; | |
1460 | intel_dp->has_audio = false; | ||
1461 | |||
1462 | if (HAS_PCH_SPLIT(dev)) | ||
1463 | return ironlake_dp_detect(connector); | ||
1464 | 1492 | ||
1465 | switch (intel_dp->output_reg) { | 1493 | switch (intel_dp->output_reg) { |
1466 | case DP_B: | 1494 | case DP_B: |
@@ -1482,14 +1510,51 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
1482 | return connector_status_disconnected; | 1510 | return connector_status_disconnected; |
1483 | 1511 | ||
1484 | status = connector_status_disconnected; | 1512 | status = connector_status_disconnected; |
1485 | if (intel_dp_aux_native_read(intel_dp, | 1513 | if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, |
1486 | 0x000, intel_dp->dpcd, | ||
1487 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) | 1514 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) |
1488 | { | 1515 | { |
1489 | if (intel_dp->dpcd[0] != 0) | 1516 | if (intel_dp->dpcd[0] != 0) |
1490 | status = connector_status_connected; | 1517 | status = connector_status_connected; |
1491 | } | 1518 | } |
1492 | return status; | 1519 | |
1520 | return bit; | ||
1521 | } | ||
1522 | |||
1523 | /** | ||
1524 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. | ||
1525 | * | ||
1526 | * \return true if DP port is connected. | ||
1527 | * \return false if DP port is disconnected. | ||
1528 | */ | ||
1529 | static enum drm_connector_status | ||
1530 | intel_dp_detect(struct drm_connector *connector, bool force) | ||
1531 | { | ||
1532 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1533 | struct drm_device *dev = intel_dp->base.base.dev; | ||
1534 | enum drm_connector_status status; | ||
1535 | struct edid *edid = NULL; | ||
1536 | |||
1537 | intel_dp->has_audio = false; | ||
1538 | |||
1539 | if (HAS_PCH_SPLIT(dev)) | ||
1540 | status = ironlake_dp_detect(intel_dp); | ||
1541 | else | ||
1542 | status = g4x_dp_detect(intel_dp); | ||
1543 | if (status != connector_status_connected) | ||
1544 | return status; | ||
1545 | |||
1546 | if (intel_dp->force_audio) { | ||
1547 | intel_dp->has_audio = intel_dp->force_audio > 0; | ||
1548 | } else { | ||
1549 | edid = drm_get_edid(connector, &intel_dp->adapter); | ||
1550 | if (edid) { | ||
1551 | intel_dp->has_audio = drm_detect_monitor_audio(edid); | ||
1552 | connector->display_info.raw_edid = NULL; | ||
1553 | kfree(edid); | ||
1554 | } | ||
1555 | } | ||
1556 | |||
1557 | return connector_status_connected; | ||
1493 | } | 1558 | } |
1494 | 1559 | ||
1495 | static int intel_dp_get_modes(struct drm_connector *connector) | 1560 | static int intel_dp_get_modes(struct drm_connector *connector) |
@@ -1504,8 +1569,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1504 | 1569 | ||
1505 | ret = intel_ddc_get_modes(connector, &intel_dp->adapter); | 1570 | ret = intel_ddc_get_modes(connector, &intel_dp->adapter); |
1506 | if (ret) { | 1571 | if (ret) { |
1507 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && | 1572 | if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) { |
1508 | !dev_priv->panel_fixed_mode) { | ||
1509 | struct drm_display_mode *newmode; | 1573 | struct drm_display_mode *newmode; |
1510 | list_for_each_entry(newmode, &connector->probed_modes, | 1574 | list_for_each_entry(newmode, &connector->probed_modes, |
1511 | head) { | 1575 | head) { |
@@ -1521,7 +1585,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1521 | } | 1585 | } |
1522 | 1586 | ||
1523 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 1587 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ |
1524 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { | 1588 | if (is_edp(intel_dp)) { |
1525 | if (dev_priv->panel_fixed_mode != NULL) { | 1589 | if (dev_priv->panel_fixed_mode != NULL) { |
1526 | struct drm_display_mode *mode; | 1590 | struct drm_display_mode *mode; |
1527 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 1591 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); |
@@ -1532,6 +1596,46 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1532 | return 0; | 1596 | return 0; |
1533 | } | 1597 | } |
1534 | 1598 | ||
1599 | static int | ||
1600 | intel_dp_set_property(struct drm_connector *connector, | ||
1601 | struct drm_property *property, | ||
1602 | uint64_t val) | ||
1603 | { | ||
1604 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1605 | int ret; | ||
1606 | |||
1607 | ret = drm_connector_property_set_value(connector, property, val); | ||
1608 | if (ret) | ||
1609 | return ret; | ||
1610 | |||
1611 | if (property == intel_dp->force_audio_property) { | ||
1612 | if (val == intel_dp->force_audio) | ||
1613 | return 0; | ||
1614 | |||
1615 | intel_dp->force_audio = val; | ||
1616 | |||
1617 | if (val > 0 && intel_dp->has_audio) | ||
1618 | return 0; | ||
1619 | if (val < 0 && !intel_dp->has_audio) | ||
1620 | return 0; | ||
1621 | |||
1622 | intel_dp->has_audio = val > 0; | ||
1623 | goto done; | ||
1624 | } | ||
1625 | |||
1626 | return -EINVAL; | ||
1627 | |||
1628 | done: | ||
1629 | if (intel_dp->base.base.crtc) { | ||
1630 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | ||
1631 | drm_crtc_helper_set_mode(crtc, &crtc->mode, | ||
1632 | crtc->x, crtc->y, | ||
1633 | crtc->fb); | ||
1634 | } | ||
1635 | |||
1636 | return 0; | ||
1637 | } | ||
1638 | |||
1535 | static void | 1639 | static void |
1536 | intel_dp_destroy (struct drm_connector *connector) | 1640 | intel_dp_destroy (struct drm_connector *connector) |
1537 | { | 1641 | { |
@@ -1561,6 +1665,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { | |||
1561 | .dpms = drm_helper_connector_dpms, | 1665 | .dpms = drm_helper_connector_dpms, |
1562 | .detect = intel_dp_detect, | 1666 | .detect = intel_dp_detect, |
1563 | .fill_modes = drm_helper_probe_single_connector_modes, | 1667 | .fill_modes = drm_helper_probe_single_connector_modes, |
1668 | .set_property = intel_dp_set_property, | ||
1564 | .destroy = intel_dp_destroy, | 1669 | .destroy = intel_dp_destroy, |
1565 | }; | 1670 | }; |
1566 | 1671 | ||
@@ -1625,6 +1730,20 @@ bool intel_dpd_is_edp(struct drm_device *dev) | |||
1625 | return false; | 1730 | return false; |
1626 | } | 1731 | } |
1627 | 1732 | ||
1733 | static void | ||
1734 | intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) | ||
1735 | { | ||
1736 | struct drm_device *dev = connector->dev; | ||
1737 | |||
1738 | intel_dp->force_audio_property = | ||
1739 | drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); | ||
1740 | if (intel_dp->force_audio_property) { | ||
1741 | intel_dp->force_audio_property->values[0] = -1; | ||
1742 | intel_dp->force_audio_property->values[1] = 1; | ||
1743 | drm_connector_attach_property(connector, intel_dp->force_audio_property, 0); | ||
1744 | } | ||
1745 | } | ||
1746 | |||
1628 | void | 1747 | void |
1629 | intel_dp_init(struct drm_device *dev, int output_reg) | 1748 | intel_dp_init(struct drm_device *dev, int output_reg) |
1630 | { | 1749 | { |
@@ -1651,7 +1770,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1651 | if (intel_dpd_is_edp(dev)) | 1770 | if (intel_dpd_is_edp(dev)) |
1652 | intel_dp->is_pch_edp = true; | 1771 | intel_dp->is_pch_edp = true; |
1653 | 1772 | ||
1654 | if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { | 1773 | if (output_reg == DP_A || is_pch_edp(intel_dp)) { |
1655 | type = DRM_MODE_CONNECTOR_eDP; | 1774 | type = DRM_MODE_CONNECTOR_eDP; |
1656 | intel_encoder->type = INTEL_OUTPUT_EDP; | 1775 | intel_encoder->type = INTEL_OUTPUT_EDP; |
1657 | } else { | 1776 | } else { |
@@ -1672,7 +1791,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1672 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | 1791 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
1673 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 1792 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
1674 | 1793 | ||
1675 | if (IS_eDP(intel_dp)) | 1794 | if (is_edp(intel_dp)) |
1676 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 1795 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); |
1677 | 1796 | ||
1678 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 1797 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
@@ -1717,9 +1836,29 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1717 | 1836 | ||
1718 | intel_dp_i2c_init(intel_dp, intel_connector, name); | 1837 | intel_dp_i2c_init(intel_dp, intel_connector, name); |
1719 | 1838 | ||
1839 | /* Cache some DPCD data in the eDP case */ | ||
1840 | if (is_edp(intel_dp)) { | ||
1841 | int ret; | ||
1842 | bool was_on; | ||
1843 | |||
1844 | was_on = ironlake_edp_panel_on(intel_dp); | ||
1845 | ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, | ||
1846 | intel_dp->dpcd, | ||
1847 | sizeof(intel_dp->dpcd)); | ||
1848 | if (ret == sizeof(intel_dp->dpcd)) { | ||
1849 | if (intel_dp->dpcd[0] >= 0x11) | ||
1850 | dev_priv->no_aux_handshake = intel_dp->dpcd[3] & | ||
1851 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; | ||
1852 | } else { | ||
1853 | DRM_ERROR("failed to retrieve link info\n"); | ||
1854 | } | ||
1855 | if (!was_on) | ||
1856 | ironlake_edp_panel_off(dev); | ||
1857 | } | ||
1858 | |||
1720 | intel_encoder->hot_plug = intel_dp_hot_plug; | 1859 | intel_encoder->hot_plug = intel_dp_hot_plug; |
1721 | 1860 | ||
1722 | if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { | 1861 | if (is_edp(intel_dp)) { |
1723 | /* initialize panel mode from VBT if available for eDP */ | 1862 | /* initialize panel mode from VBT if available for eDP */ |
1724 | if (dev_priv->lfp_lvds_vbt_mode) { | 1863 | if (dev_priv->lfp_lvds_vbt_mode) { |
1725 | dev_priv->panel_fixed_mode = | 1864 | dev_priv->panel_fixed_mode = |
@@ -1731,6 +1870,8 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1731 | } | 1870 | } |
1732 | } | 1871 | } |
1733 | 1872 | ||
1873 | intel_dp_add_properties(intel_dp, connector); | ||
1874 | |||
1734 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | 1875 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
1735 | * 0xd. Failure to do so will result in spurious interrupts being | 1876 | * 0xd. Failure to do so will result in spurious interrupts being |
1736 | * generated on the port when a cable is not attached. | 1877 | * generated on the port when a cable is not attached. |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 40e99bf27ff..9af9f86a876 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -178,6 +178,38 @@ struct intel_crtc { | |||
178 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) | 178 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) |
179 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 179 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
180 | 180 | ||
181 | #define DIP_TYPE_AVI 0x82 | ||
182 | #define DIP_VERSION_AVI 0x2 | ||
183 | #define DIP_LEN_AVI 13 | ||
184 | |||
185 | struct dip_infoframe { | ||
186 | uint8_t type; /* HB0 */ | ||
187 | uint8_t ver; /* HB1 */ | ||
188 | uint8_t len; /* HB2 - body len, not including checksum */ | ||
189 | uint8_t ecc; /* Header ECC */ | ||
190 | uint8_t checksum; /* PB0 */ | ||
191 | union { | ||
192 | struct { | ||
193 | /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ | ||
194 | uint8_t Y_A_B_S; | ||
195 | /* PB2 - C 7:6, M 5:4, R 3:0 */ | ||
196 | uint8_t C_M_R; | ||
197 | /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ | ||
198 | uint8_t ITC_EC_Q_SC; | ||
199 | /* PB4 - VIC 6:0 */ | ||
200 | uint8_t VIC; | ||
201 | /* PB5 - PR 3:0 */ | ||
202 | uint8_t PR; | ||
203 | /* PB6 to PB13 */ | ||
204 | uint16_t top_bar_end; | ||
205 | uint16_t bottom_bar_start; | ||
206 | uint16_t left_bar_end; | ||
207 | uint16_t right_bar_start; | ||
208 | } avi; | ||
209 | uint8_t payload[27]; | ||
210 | } __attribute__ ((packed)) body; | ||
211 | } __attribute__((packed)); | ||
212 | |||
181 | static inline struct drm_crtc * | 213 | static inline struct drm_crtc * |
182 | intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) | 214 | intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) |
183 | { | 215 | { |
@@ -200,6 +232,7 @@ extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); | |||
200 | 232 | ||
201 | extern void intel_crt_init(struct drm_device *dev); | 233 | extern void intel_crt_init(struct drm_device *dev); |
202 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); | 234 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); |
235 | void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); | ||
203 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); | 236 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); |
204 | extern void intel_dvo_init(struct drm_device *dev); | 237 | extern void intel_dvo_init(struct drm_device *dev); |
205 | extern void intel_tv_init(struct drm_device *dev); | 238 | extern void intel_tv_init(struct drm_device *dev); |
@@ -209,9 +242,9 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); | |||
209 | void | 242 | void |
210 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 243 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
211 | struct drm_display_mode *adjusted_mode); | 244 | struct drm_display_mode *adjusted_mode); |
212 | extern bool intel_pch_has_edp(struct drm_crtc *crtc); | ||
213 | extern bool intel_dpd_is_edp(struct drm_device *dev); | 245 | extern bool intel_dpd_is_edp(struct drm_device *dev); |
214 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); | 246 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); |
247 | extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); | ||
215 | 248 | ||
216 | /* intel_panel.c */ | 249 | /* intel_panel.c */ |
217 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 250 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 521622b9be7..af2a1dddc28 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -225,7 +225,7 @@ static void intel_fbdev_destroy(struct drm_device *dev, | |||
225 | 225 | ||
226 | drm_framebuffer_cleanup(&ifb->base); | 226 | drm_framebuffer_cleanup(&ifb->base); |
227 | if (ifb->obj) { | 227 | if (ifb->obj) { |
228 | drm_gem_object_unreference(ifb->obj); | 228 | drm_gem_object_unreference_unlocked(ifb->obj); |
229 | ifb->obj = NULL; | 229 | ifb->obj = NULL; |
230 | } | 230 | } |
231 | } | 231 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 9fb9501f2d0..0d0273e7b02 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -42,6 +42,9 @@ struct intel_hdmi { | |||
42 | u32 sdvox_reg; | 42 | u32 sdvox_reg; |
43 | int ddc_bus; | 43 | int ddc_bus; |
44 | bool has_hdmi_sink; | 44 | bool has_hdmi_sink; |
45 | bool has_audio; | ||
46 | int force_audio; | ||
47 | struct drm_property *force_audio_property; | ||
45 | }; | 48 | }; |
46 | 49 | ||
47 | static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) | 50 | static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) |
@@ -55,6 +58,60 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) | |||
55 | struct intel_hdmi, base); | 58 | struct intel_hdmi, base); |
56 | } | 59 | } |
57 | 60 | ||
61 | void intel_dip_infoframe_csum(struct dip_infoframe *avi_if) | ||
62 | { | ||
63 | uint8_t *data = (uint8_t *)avi_if; | ||
64 | uint8_t sum = 0; | ||
65 | unsigned i; | ||
66 | |||
67 | avi_if->checksum = 0; | ||
68 | avi_if->ecc = 0; | ||
69 | |||
70 | for (i = 0; i < sizeof(*avi_if); i++) | ||
71 | sum += data[i]; | ||
72 | |||
73 | avi_if->checksum = 0x100 - sum; | ||
74 | } | ||
75 | |||
76 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) | ||
77 | { | ||
78 | struct dip_infoframe avi_if = { | ||
79 | .type = DIP_TYPE_AVI, | ||
80 | .ver = DIP_VERSION_AVI, | ||
81 | .len = DIP_LEN_AVI, | ||
82 | }; | ||
83 | uint32_t *data = (uint32_t *)&avi_if; | ||
84 | struct drm_device *dev = encoder->dev; | ||
85 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
86 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
87 | u32 port; | ||
88 | unsigned i; | ||
89 | |||
90 | if (!intel_hdmi->has_hdmi_sink) | ||
91 | return; | ||
92 | |||
93 | /* XXX first guess at handling video port, is this corrent? */ | ||
94 | if (intel_hdmi->sdvox_reg == SDVOB) | ||
95 | port = VIDEO_DIP_PORT_B; | ||
96 | else if (intel_hdmi->sdvox_reg == SDVOC) | ||
97 | port = VIDEO_DIP_PORT_C; | ||
98 | else | ||
99 | return; | ||
100 | |||
101 | I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | | ||
102 | VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC); | ||
103 | |||
104 | intel_dip_infoframe_csum(&avi_if); | ||
105 | for (i = 0; i < sizeof(avi_if); i += 4) { | ||
106 | I915_WRITE(VIDEO_DIP_DATA, *data); | ||
107 | data++; | ||
108 | } | ||
109 | |||
110 | I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | | ||
111 | VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC | | ||
112 | VIDEO_DIP_ENABLE_AVI); | ||
113 | } | ||
114 | |||
58 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, | 115 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, |
59 | struct drm_display_mode *mode, | 116 | struct drm_display_mode *mode, |
60 | struct drm_display_mode *adjusted_mode) | 117 | struct drm_display_mode *adjusted_mode) |
@@ -72,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
72 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 129 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
73 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; | 130 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; |
74 | 131 | ||
75 | if (intel_hdmi->has_hdmi_sink) { | 132 | /* Required on CPT */ |
133 | if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) | ||
134 | sdvox |= HDMI_MODE_SELECT; | ||
135 | |||
136 | if (intel_hdmi->has_audio) { | ||
76 | sdvox |= SDVO_AUDIO_ENABLE; | 137 | sdvox |= SDVO_AUDIO_ENABLE; |
77 | if (HAS_PCH_CPT(dev)) | 138 | sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; |
78 | sdvox |= HDMI_MODE_SELECT; | ||
79 | } | 139 | } |
80 | 140 | ||
81 | if (intel_crtc->pipe == 1) { | 141 | if (intel_crtc->pipe == 1) { |
@@ -87,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
87 | 147 | ||
88 | I915_WRITE(intel_hdmi->sdvox_reg, sdvox); | 148 | I915_WRITE(intel_hdmi->sdvox_reg, sdvox); |
89 | POSTING_READ(intel_hdmi->sdvox_reg); | 149 | POSTING_READ(intel_hdmi->sdvox_reg); |
150 | |||
151 | intel_hdmi_set_avi_infoframe(encoder); | ||
90 | } | 152 | } |
91 | 153 | ||
92 | static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | 154 | static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) |
@@ -154,6 +216,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
154 | enum drm_connector_status status = connector_status_disconnected; | 216 | enum drm_connector_status status = connector_status_disconnected; |
155 | 217 | ||
156 | intel_hdmi->has_hdmi_sink = false; | 218 | intel_hdmi->has_hdmi_sink = false; |
219 | intel_hdmi->has_audio = false; | ||
157 | edid = drm_get_edid(connector, | 220 | edid = drm_get_edid(connector, |
158 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); | 221 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); |
159 | 222 | ||
@@ -161,11 +224,17 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
161 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 224 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
162 | status = connector_status_connected; | 225 | status = connector_status_connected; |
163 | intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | 226 | intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); |
227 | intel_hdmi->has_audio = drm_detect_monitor_audio(edid); | ||
164 | } | 228 | } |
165 | connector->display_info.raw_edid = NULL; | 229 | connector->display_info.raw_edid = NULL; |
166 | kfree(edid); | 230 | kfree(edid); |
167 | } | 231 | } |
168 | 232 | ||
233 | if (status == connector_status_connected) { | ||
234 | if (intel_hdmi->force_audio) | ||
235 | intel_hdmi->has_audio = intel_hdmi->force_audio > 0; | ||
236 | } | ||
237 | |||
169 | return status; | 238 | return status; |
170 | } | 239 | } |
171 | 240 | ||
@@ -182,6 +251,46 @@ static int intel_hdmi_get_modes(struct drm_connector *connector) | |||
182 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); | 251 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); |
183 | } | 252 | } |
184 | 253 | ||
254 | static int | ||
255 | intel_hdmi_set_property(struct drm_connector *connector, | ||
256 | struct drm_property *property, | ||
257 | uint64_t val) | ||
258 | { | ||
259 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | ||
260 | int ret; | ||
261 | |||
262 | ret = drm_connector_property_set_value(connector, property, val); | ||
263 | if (ret) | ||
264 | return ret; | ||
265 | |||
266 | if (property == intel_hdmi->force_audio_property) { | ||
267 | if (val == intel_hdmi->force_audio) | ||
268 | return 0; | ||
269 | |||
270 | intel_hdmi->force_audio = val; | ||
271 | |||
272 | if (val > 0 && intel_hdmi->has_audio) | ||
273 | return 0; | ||
274 | if (val < 0 && !intel_hdmi->has_audio) | ||
275 | return 0; | ||
276 | |||
277 | intel_hdmi->has_audio = val > 0; | ||
278 | goto done; | ||
279 | } | ||
280 | |||
281 | return -EINVAL; | ||
282 | |||
283 | done: | ||
284 | if (intel_hdmi->base.base.crtc) { | ||
285 | struct drm_crtc *crtc = intel_hdmi->base.base.crtc; | ||
286 | drm_crtc_helper_set_mode(crtc, &crtc->mode, | ||
287 | crtc->x, crtc->y, | ||
288 | crtc->fb); | ||
289 | } | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | |||
185 | static void intel_hdmi_destroy(struct drm_connector *connector) | 294 | static void intel_hdmi_destroy(struct drm_connector *connector) |
186 | { | 295 | { |
187 | drm_sysfs_connector_remove(connector); | 296 | drm_sysfs_connector_remove(connector); |
@@ -201,6 +310,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { | |||
201 | .dpms = drm_helper_connector_dpms, | 310 | .dpms = drm_helper_connector_dpms, |
202 | .detect = intel_hdmi_detect, | 311 | .detect = intel_hdmi_detect, |
203 | .fill_modes = drm_helper_probe_single_connector_modes, | 312 | .fill_modes = drm_helper_probe_single_connector_modes, |
313 | .set_property = intel_hdmi_set_property, | ||
204 | .destroy = intel_hdmi_destroy, | 314 | .destroy = intel_hdmi_destroy, |
205 | }; | 315 | }; |
206 | 316 | ||
@@ -214,6 +324,20 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { | |||
214 | .destroy = intel_encoder_destroy, | 324 | .destroy = intel_encoder_destroy, |
215 | }; | 325 | }; |
216 | 326 | ||
327 | static void | ||
328 | intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) | ||
329 | { | ||
330 | struct drm_device *dev = connector->dev; | ||
331 | |||
332 | intel_hdmi->force_audio_property = | ||
333 | drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); | ||
334 | if (intel_hdmi->force_audio_property) { | ||
335 | intel_hdmi->force_audio_property->values[0] = -1; | ||
336 | intel_hdmi->force_audio_property->values[1] = 1; | ||
337 | drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0); | ||
338 | } | ||
339 | } | ||
340 | |||
217 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | 341 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) |
218 | { | 342 | { |
219 | struct drm_i915_private *dev_priv = dev->dev_private; | 343 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -275,6 +399,8 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
275 | 399 | ||
276 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); | 400 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); |
277 | 401 | ||
402 | intel_hdmi_add_properties(intel_hdmi, connector); | ||
403 | |||
278 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 404 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
279 | drm_sysfs_connector_add(connector); | 405 | drm_sysfs_connector_add(connector); |
280 | 406 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 2449a74d4d8..2be4f728ed0 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -155,6 +155,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) | |||
155 | GPIOC, | 155 | GPIOC, |
156 | GPIOD, | 156 | GPIOD, |
157 | GPIOE, | 157 | GPIOE, |
158 | 0, | ||
158 | GPIOF, | 159 | GPIOF, |
159 | }; | 160 | }; |
160 | struct intel_gpio *gpio; | 161 | struct intel_gpio *gpio; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d89b88791aa..09f2dc353ae 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -119,12 +119,12 @@ render_ring_flush(struct drm_device *dev, | |||
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | static void ring_set_tail(struct drm_device *dev, | 122 | static void ring_write_tail(struct drm_device *dev, |
123 | struct intel_ring_buffer *ring, | 123 | struct intel_ring_buffer *ring, |
124 | u32 value) | 124 | u32 value) |
125 | { | 125 | { |
126 | drm_i915_private_t *dev_priv = dev->dev_private; | 126 | drm_i915_private_t *dev_priv = dev->dev_private; |
127 | I915_WRITE_TAIL(ring, ring->tail); | 127 | I915_WRITE_TAIL(ring, value); |
128 | } | 128 | } |
129 | 129 | ||
130 | u32 intel_ring_get_active_head(struct drm_device *dev, | 130 | u32 intel_ring_get_active_head(struct drm_device *dev, |
@@ -148,7 +148,7 @@ static int init_ring_common(struct drm_device *dev, | |||
148 | /* Stop the ring if it's running. */ | 148 | /* Stop the ring if it's running. */ |
149 | I915_WRITE_CTL(ring, 0); | 149 | I915_WRITE_CTL(ring, 0); |
150 | I915_WRITE_HEAD(ring, 0); | 150 | I915_WRITE_HEAD(ring, 0); |
151 | ring->set_tail(dev, ring, 0); | 151 | ring->write_tail(dev, ring, 0); |
152 | 152 | ||
153 | /* Initialize the ring. */ | 153 | /* Initialize the ring. */ |
154 | I915_WRITE_START(ring, obj_priv->gtt_offset); | 154 | I915_WRITE_START(ring, obj_priv->gtt_offset); |
@@ -383,9 +383,9 @@ static int init_bsd_ring(struct drm_device *dev, | |||
383 | } | 383 | } |
384 | 384 | ||
385 | static u32 | 385 | static u32 |
386 | bsd_ring_add_request(struct drm_device *dev, | 386 | ring_add_request(struct drm_device *dev, |
387 | struct intel_ring_buffer *ring, | 387 | struct intel_ring_buffer *ring, |
388 | u32 flush_domains) | 388 | u32 flush_domains) |
389 | { | 389 | { |
390 | u32 seqno; | 390 | u32 seqno; |
391 | 391 | ||
@@ -418,18 +418,18 @@ bsd_ring_put_user_irq(struct drm_device *dev, | |||
418 | } | 418 | } |
419 | 419 | ||
420 | static u32 | 420 | static u32 |
421 | bsd_ring_get_seqno(struct drm_device *dev, | 421 | ring_status_page_get_seqno(struct drm_device *dev, |
422 | struct intel_ring_buffer *ring) | 422 | struct intel_ring_buffer *ring) |
423 | { | 423 | { |
424 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 424 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
425 | } | 425 | } |
426 | 426 | ||
427 | static int | 427 | static int |
428 | bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 428 | ring_dispatch_gem_execbuffer(struct drm_device *dev, |
429 | struct intel_ring_buffer *ring, | 429 | struct intel_ring_buffer *ring, |
430 | struct drm_i915_gem_execbuffer2 *exec, | 430 | struct drm_i915_gem_execbuffer2 *exec, |
431 | struct drm_clip_rect *cliprects, | 431 | struct drm_clip_rect *cliprects, |
432 | uint64_t exec_offset) | 432 | uint64_t exec_offset) |
433 | { | 433 | { |
434 | uint32_t exec_start; | 434 | uint32_t exec_start; |
435 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 435 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
@@ -441,7 +441,6 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
441 | return 0; | 441 | return 0; |
442 | } | 442 | } |
443 | 443 | ||
444 | |||
445 | static int | 444 | static int |
446 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 445 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, |
447 | struct intel_ring_buffer *ring, | 446 | struct intel_ring_buffer *ring, |
@@ -476,7 +475,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
476 | intel_ring_emit(dev, ring, exec_start + exec_len - 4); | 475 | intel_ring_emit(dev, ring, exec_start + exec_len - 4); |
477 | intel_ring_emit(dev, ring, 0); | 476 | intel_ring_emit(dev, ring, 0); |
478 | } else { | 477 | } else { |
479 | intel_ring_begin(dev, ring, 4); | 478 | intel_ring_begin(dev, ring, 2); |
480 | if (INTEL_INFO(dev)->gen >= 4) { | 479 | if (INTEL_INFO(dev)->gen >= 4) { |
481 | intel_ring_emit(dev, ring, | 480 | intel_ring_emit(dev, ring, |
482 | MI_BATCH_BUFFER_START | (2 << 6) | 481 | MI_BATCH_BUFFER_START | (2 << 6) |
@@ -492,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
492 | intel_ring_advance(dev, ring); | 491 | intel_ring_advance(dev, ring); |
493 | } | 492 | } |
494 | 493 | ||
495 | if (IS_G4X(dev) || IS_IRONLAKE(dev)) { | 494 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
496 | intel_ring_begin(dev, ring, 2); | 495 | intel_ring_begin(dev, ring, 2); |
497 | intel_ring_emit(dev, ring, MI_FLUSH | | 496 | intel_ring_emit(dev, ring, MI_FLUSH | |
498 | MI_NO_WRITE_FLUSH | | 497 | MI_NO_WRITE_FLUSH | |
@@ -581,6 +580,7 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
581 | ring->dev = dev; | 580 | ring->dev = dev; |
582 | INIT_LIST_HEAD(&ring->active_list); | 581 | INIT_LIST_HEAD(&ring->active_list); |
583 | INIT_LIST_HEAD(&ring->request_list); | 582 | INIT_LIST_HEAD(&ring->request_list); |
583 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
584 | 584 | ||
585 | if (I915_NEED_GFX_HWS(dev)) { | 585 | if (I915_NEED_GFX_HWS(dev)) { |
586 | ret = init_status_page(dev, ring); | 586 | ret = init_status_page(dev, ring); |
@@ -707,7 +707,7 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
707 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 707 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
708 | } | 708 | } |
709 | 709 | ||
710 | yield(); | 710 | msleep(1); |
711 | } while (!time_after(jiffies, end)); | 711 | } while (!time_after(jiffies, end)); |
712 | trace_i915_ring_wait_end (dev); | 712 | trace_i915_ring_wait_end (dev); |
713 | return -EBUSY; | 713 | return -EBUSY; |
@@ -730,22 +730,7 @@ void intel_ring_advance(struct drm_device *dev, | |||
730 | struct intel_ring_buffer *ring) | 730 | struct intel_ring_buffer *ring) |
731 | { | 731 | { |
732 | ring->tail &= ring->size - 1; | 732 | ring->tail &= ring->size - 1; |
733 | ring->set_tail(dev, ring, ring->tail); | 733 | ring->write_tail(dev, ring, ring->tail); |
734 | } | ||
735 | |||
736 | void intel_fill_struct(struct drm_device *dev, | ||
737 | struct intel_ring_buffer *ring, | ||
738 | void *data, | ||
739 | unsigned int len) | ||
740 | { | ||
741 | unsigned int *virt = ring->virtual_start + ring->tail; | ||
742 | BUG_ON((len&~(4-1)) != 0); | ||
743 | intel_ring_begin(dev, ring, len/4); | ||
744 | memcpy(virt, data, len); | ||
745 | ring->tail += len; | ||
746 | ring->tail &= ring->size - 1; | ||
747 | ring->space -= len; | ||
748 | intel_ring_advance(dev, ring); | ||
749 | } | 734 | } |
750 | 735 | ||
751 | static const struct intel_ring_buffer render_ring = { | 736 | static const struct intel_ring_buffer render_ring = { |
@@ -754,7 +739,7 @@ static const struct intel_ring_buffer render_ring = { | |||
754 | .mmio_base = RENDER_RING_BASE, | 739 | .mmio_base = RENDER_RING_BASE, |
755 | .size = 32 * PAGE_SIZE, | 740 | .size = 32 * PAGE_SIZE, |
756 | .init = init_render_ring, | 741 | .init = init_render_ring, |
757 | .set_tail = ring_set_tail, | 742 | .write_tail = ring_write_tail, |
758 | .flush = render_ring_flush, | 743 | .flush = render_ring_flush, |
759 | .add_request = render_ring_add_request, | 744 | .add_request = render_ring_add_request, |
760 | .get_seqno = render_ring_get_seqno, | 745 | .get_seqno = render_ring_get_seqno, |
@@ -771,19 +756,19 @@ static const struct intel_ring_buffer bsd_ring = { | |||
771 | .mmio_base = BSD_RING_BASE, | 756 | .mmio_base = BSD_RING_BASE, |
772 | .size = 32 * PAGE_SIZE, | 757 | .size = 32 * PAGE_SIZE, |
773 | .init = init_bsd_ring, | 758 | .init = init_bsd_ring, |
774 | .set_tail = ring_set_tail, | 759 | .write_tail = ring_write_tail, |
775 | .flush = bsd_ring_flush, | 760 | .flush = bsd_ring_flush, |
776 | .add_request = bsd_ring_add_request, | 761 | .add_request = ring_add_request, |
777 | .get_seqno = bsd_ring_get_seqno, | 762 | .get_seqno = ring_status_page_get_seqno, |
778 | .user_irq_get = bsd_ring_get_user_irq, | 763 | .user_irq_get = bsd_ring_get_user_irq, |
779 | .user_irq_put = bsd_ring_put_user_irq, | 764 | .user_irq_put = bsd_ring_put_user_irq, |
780 | .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, | 765 | .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, |
781 | }; | 766 | }; |
782 | 767 | ||
783 | 768 | ||
784 | static void gen6_bsd_ring_set_tail(struct drm_device *dev, | 769 | static void gen6_bsd_ring_write_tail(struct drm_device *dev, |
785 | struct intel_ring_buffer *ring, | 770 | struct intel_ring_buffer *ring, |
786 | u32 value) | 771 | u32 value) |
787 | { | 772 | { |
788 | drm_i915_private_t *dev_priv = dev->dev_private; | 773 | drm_i915_private_t *dev_priv = dev->dev_private; |
789 | 774 | ||
@@ -804,10 +789,10 @@ static void gen6_bsd_ring_set_tail(struct drm_device *dev, | |||
804 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | 789 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
805 | } | 790 | } |
806 | 791 | ||
807 | static void gen6_bsd_ring_flush(struct drm_device *dev, | 792 | static void gen6_ring_flush(struct drm_device *dev, |
808 | struct intel_ring_buffer *ring, | 793 | struct intel_ring_buffer *ring, |
809 | u32 invalidate_domains, | 794 | u32 invalidate_domains, |
810 | u32 flush_domains) | 795 | u32 flush_domains) |
811 | { | 796 | { |
812 | intel_ring_begin(dev, ring, 4); | 797 | intel_ring_begin(dev, ring, 4); |
813 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | 798 | intel_ring_emit(dev, ring, MI_FLUSH_DW); |
@@ -818,11 +803,11 @@ static void gen6_bsd_ring_flush(struct drm_device *dev, | |||
818 | } | 803 | } |
819 | 804 | ||
820 | static int | 805 | static int |
821 | gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 806 | gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, |
822 | struct intel_ring_buffer *ring, | 807 | struct intel_ring_buffer *ring, |
823 | struct drm_i915_gem_execbuffer2 *exec, | 808 | struct drm_i915_gem_execbuffer2 *exec, |
824 | struct drm_clip_rect *cliprects, | 809 | struct drm_clip_rect *cliprects, |
825 | uint64_t exec_offset) | 810 | uint64_t exec_offset) |
826 | { | 811 | { |
827 | uint32_t exec_start; | 812 | uint32_t exec_start; |
828 | 813 | ||
@@ -845,13 +830,43 @@ static const struct intel_ring_buffer gen6_bsd_ring = { | |||
845 | .mmio_base = GEN6_BSD_RING_BASE, | 830 | .mmio_base = GEN6_BSD_RING_BASE, |
846 | .size = 32 * PAGE_SIZE, | 831 | .size = 32 * PAGE_SIZE, |
847 | .init = init_bsd_ring, | 832 | .init = init_bsd_ring, |
848 | .set_tail = gen6_bsd_ring_set_tail, | 833 | .write_tail = gen6_bsd_ring_write_tail, |
849 | .flush = gen6_bsd_ring_flush, | 834 | .flush = gen6_ring_flush, |
850 | .add_request = bsd_ring_add_request, | 835 | .add_request = ring_add_request, |
851 | .get_seqno = bsd_ring_get_seqno, | 836 | .get_seqno = ring_status_page_get_seqno, |
852 | .user_irq_get = bsd_ring_get_user_irq, | 837 | .user_irq_get = bsd_ring_get_user_irq, |
853 | .user_irq_put = bsd_ring_put_user_irq, | 838 | .user_irq_put = bsd_ring_put_user_irq, |
854 | .dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer, | 839 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, |
840 | }; | ||
841 | |||
842 | /* Blitter support (SandyBridge+) */ | ||
843 | |||
844 | static void | ||
845 | blt_ring_get_user_irq(struct drm_device *dev, | ||
846 | struct intel_ring_buffer *ring) | ||
847 | { | ||
848 | /* do nothing */ | ||
849 | } | ||
850 | static void | ||
851 | blt_ring_put_user_irq(struct drm_device *dev, | ||
852 | struct intel_ring_buffer *ring) | ||
853 | { | ||
854 | /* do nothing */ | ||
855 | } | ||
856 | |||
857 | static const struct intel_ring_buffer gen6_blt_ring = { | ||
858 | .name = "blt ring", | ||
859 | .id = RING_BLT, | ||
860 | .mmio_base = BLT_RING_BASE, | ||
861 | .size = 32 * PAGE_SIZE, | ||
862 | .init = init_ring_common, | ||
863 | .write_tail = ring_write_tail, | ||
864 | .flush = gen6_ring_flush, | ||
865 | .add_request = ring_add_request, | ||
866 | .get_seqno = ring_status_page_get_seqno, | ||
867 | .user_irq_get = blt_ring_get_user_irq, | ||
868 | .user_irq_put = blt_ring_put_user_irq, | ||
869 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | ||
855 | }; | 870 | }; |
856 | 871 | ||
857 | int intel_init_render_ring_buffer(struct drm_device *dev) | 872 | int intel_init_render_ring_buffer(struct drm_device *dev) |
@@ -881,3 +896,12 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
881 | 896 | ||
882 | return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); | 897 | return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); |
883 | } | 898 | } |
899 | |||
900 | int intel_init_blt_ring_buffer(struct drm_device *dev) | ||
901 | { | ||
902 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
903 | |||
904 | dev_priv->blt_ring = gen6_blt_ring; | ||
905 | |||
906 | return intel_init_ring_buffer(dev, &dev_priv->blt_ring); | ||
907 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 9725f783db2..a05aff0e576 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -22,6 +22,7 @@ struct intel_ring_buffer { | |||
22 | enum intel_ring_id { | 22 | enum intel_ring_id { |
23 | RING_RENDER = 0x1, | 23 | RING_RENDER = 0x1, |
24 | RING_BSD = 0x2, | 24 | RING_BSD = 0x2, |
25 | RING_BLT = 0x4, | ||
25 | } id; | 26 | } id; |
26 | u32 mmio_base; | 27 | u32 mmio_base; |
27 | unsigned long size; | 28 | unsigned long size; |
@@ -45,9 +46,9 @@ struct intel_ring_buffer { | |||
45 | int (*init)(struct drm_device *dev, | 46 | int (*init)(struct drm_device *dev, |
46 | struct intel_ring_buffer *ring); | 47 | struct intel_ring_buffer *ring); |
47 | 48 | ||
48 | void (*set_tail)(struct drm_device *dev, | 49 | void (*write_tail)(struct drm_device *dev, |
49 | struct intel_ring_buffer *ring, | 50 | struct intel_ring_buffer *ring, |
50 | u32 value); | 51 | u32 value); |
51 | void (*flush)(struct drm_device *dev, | 52 | void (*flush)(struct drm_device *dev, |
52 | struct intel_ring_buffer *ring, | 53 | struct intel_ring_buffer *ring, |
53 | u32 invalidate_domains, | 54 | u32 invalidate_domains, |
@@ -82,6 +83,15 @@ struct intel_ring_buffer { | |||
82 | struct list_head request_list; | 83 | struct list_head request_list; |
83 | 84 | ||
84 | /** | 85 | /** |
86 | * List of objects currently pending a GPU write flush. | ||
87 | * | ||
88 | * All elements on this list will belong to either the | ||
89 | * active_list or flushing_list, last_rendering_seqno can | ||
90 | * be used to differentiate between the two elements. | ||
91 | */ | ||
92 | struct list_head gpu_write_list; | ||
93 | |||
94 | /** | ||
85 | * Do we have some not yet emitted requests outstanding? | 95 | * Do we have some not yet emitted requests outstanding? |
86 | */ | 96 | */ |
87 | bool outstanding_lazy_request; | 97 | bool outstanding_lazy_request; |
@@ -116,10 +126,6 @@ static inline void intel_ring_emit(struct drm_device *dev, | |||
116 | ring->tail += 4; | 126 | ring->tail += 4; |
117 | } | 127 | } |
118 | 128 | ||
119 | void intel_fill_struct(struct drm_device *dev, | ||
120 | struct intel_ring_buffer *ring, | ||
121 | void *data, | ||
122 | unsigned int len); | ||
123 | void intel_ring_advance(struct drm_device *dev, | 129 | void intel_ring_advance(struct drm_device *dev, |
124 | struct intel_ring_buffer *ring); | 130 | struct intel_ring_buffer *ring); |
125 | 131 | ||
@@ -128,6 +134,7 @@ u32 intel_ring_get_seqno(struct drm_device *dev, | |||
128 | 134 | ||
129 | int intel_init_render_ring_buffer(struct drm_device *dev); | 135 | int intel_init_render_ring_buffer(struct drm_device *dev); |
130 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | 136 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
137 | int intel_init_blt_ring_buffer(struct drm_device *dev); | ||
131 | 138 | ||
132 | u32 intel_ring_get_active_head(struct drm_device *dev, | 139 | u32 intel_ring_get_active_head(struct drm_device *dev, |
133 | struct intel_ring_buffer *ring); | 140 | struct intel_ring_buffer *ring); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index a84224f3760..de158b76bcd 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -107,6 +107,7 @@ struct intel_sdvo { | |||
107 | * This is set if we treat the device as HDMI, instead of DVI. | 107 | * This is set if we treat the device as HDMI, instead of DVI. |
108 | */ | 108 | */ |
109 | bool is_hdmi; | 109 | bool is_hdmi; |
110 | bool has_audio; | ||
110 | 111 | ||
111 | /** | 112 | /** |
112 | * This is set if we detect output of sdvo device as LVDS and | 113 | * This is set if we detect output of sdvo device as LVDS and |
@@ -119,12 +120,6 @@ struct intel_sdvo { | |||
119 | */ | 120 | */ |
120 | struct drm_display_mode *sdvo_lvds_fixed_mode; | 121 | struct drm_display_mode *sdvo_lvds_fixed_mode; |
121 | 122 | ||
122 | /* | ||
123 | * supported encoding mode, used to determine whether HDMI is | ||
124 | * supported | ||
125 | */ | ||
126 | struct intel_sdvo_encode encode; | ||
127 | |||
128 | /* DDC bus used by this SDVO encoder */ | 123 | /* DDC bus used by this SDVO encoder */ |
129 | uint8_t ddc_bus; | 124 | uint8_t ddc_bus; |
130 | 125 | ||
@@ -138,11 +133,15 @@ struct intel_sdvo_connector { | |||
138 | /* Mark the type of connector */ | 133 | /* Mark the type of connector */ |
139 | uint16_t output_flag; | 134 | uint16_t output_flag; |
140 | 135 | ||
136 | int force_audio; | ||
137 | |||
141 | /* This contains all current supported TV format */ | 138 | /* This contains all current supported TV format */ |
142 | u8 tv_format_supported[TV_FORMAT_NUM]; | 139 | u8 tv_format_supported[TV_FORMAT_NUM]; |
143 | int format_supported_num; | 140 | int format_supported_num; |
144 | struct drm_property *tv_format; | 141 | struct drm_property *tv_format; |
145 | 142 | ||
143 | struct drm_property *force_audio_property; | ||
144 | |||
146 | /* add the property for the SDVO-TV */ | 145 | /* add the property for the SDVO-TV */ |
147 | struct drm_property *left; | 146 | struct drm_property *left; |
148 | struct drm_property *right; | 147 | struct drm_property *right; |
@@ -794,17 +793,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, | |||
794 | mode->flags |= DRM_MODE_FLAG_PVSYNC; | 793 | mode->flags |= DRM_MODE_FLAG_PVSYNC; |
795 | } | 794 | } |
796 | 795 | ||
797 | static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo, | 796 | static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) |
798 | struct intel_sdvo_encode *encode) | ||
799 | { | 797 | { |
800 | if (intel_sdvo_get_value(intel_sdvo, | 798 | struct intel_sdvo_encode encode; |
801 | SDVO_CMD_GET_SUPP_ENCODE, | ||
802 | encode, sizeof(*encode))) | ||
803 | return true; | ||
804 | 799 | ||
805 | /* non-support means DVI */ | 800 | return intel_sdvo_get_value(intel_sdvo, |
806 | memset(encode, 0, sizeof(*encode)); | 801 | SDVO_CMD_GET_SUPP_ENCODE, |
807 | return false; | 802 | &encode, sizeof(encode)); |
808 | } | 803 | } |
809 | 804 | ||
810 | static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, | 805 | static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, |
@@ -849,115 +844,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) | |||
849 | } | 844 | } |
850 | #endif | 845 | #endif |
851 | 846 | ||
852 | static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo, | 847 | static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) |
853 | int index, | ||
854 | uint8_t *data, int8_t size, uint8_t tx_rate) | ||
855 | { | ||
856 | uint8_t set_buf_index[2]; | ||
857 | |||
858 | set_buf_index[0] = index; | ||
859 | set_buf_index[1] = 0; | ||
860 | |||
861 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, | ||
862 | set_buf_index, 2)) | ||
863 | return false; | ||
864 | |||
865 | for (; size > 0; size -= 8) { | ||
866 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8)) | ||
867 | return false; | ||
868 | |||
869 | data += 8; | ||
870 | } | ||
871 | |||
872 | return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); | ||
873 | } | ||
874 | |||
875 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) | ||
876 | { | ||
877 | uint8_t csum = 0; | ||
878 | int i; | ||
879 | |||
880 | for (i = 0; i < size; i++) | ||
881 | csum += data[i]; | ||
882 | |||
883 | return 0x100 - csum; | ||
884 | } | ||
885 | |||
886 | #define DIP_TYPE_AVI 0x82 | ||
887 | #define DIP_VERSION_AVI 0x2 | ||
888 | #define DIP_LEN_AVI 13 | ||
889 | |||
890 | struct dip_infoframe { | ||
891 | uint8_t type; | ||
892 | uint8_t version; | ||
893 | uint8_t len; | ||
894 | uint8_t checksum; | ||
895 | union { | ||
896 | struct { | ||
897 | /* Packet Byte #1 */ | ||
898 | uint8_t S:2; | ||
899 | uint8_t B:2; | ||
900 | uint8_t A:1; | ||
901 | uint8_t Y:2; | ||
902 | uint8_t rsvd1:1; | ||
903 | /* Packet Byte #2 */ | ||
904 | uint8_t R:4; | ||
905 | uint8_t M:2; | ||
906 | uint8_t C:2; | ||
907 | /* Packet Byte #3 */ | ||
908 | uint8_t SC:2; | ||
909 | uint8_t Q:2; | ||
910 | uint8_t EC:3; | ||
911 | uint8_t ITC:1; | ||
912 | /* Packet Byte #4 */ | ||
913 | uint8_t VIC:7; | ||
914 | uint8_t rsvd2:1; | ||
915 | /* Packet Byte #5 */ | ||
916 | uint8_t PR:4; | ||
917 | uint8_t rsvd3:4; | ||
918 | /* Packet Byte #6~13 */ | ||
919 | uint16_t top_bar_end; | ||
920 | uint16_t bottom_bar_start; | ||
921 | uint16_t left_bar_end; | ||
922 | uint16_t right_bar_start; | ||
923 | } avi; | ||
924 | struct { | ||
925 | /* Packet Byte #1 */ | ||
926 | uint8_t channel_count:3; | ||
927 | uint8_t rsvd1:1; | ||
928 | uint8_t coding_type:4; | ||
929 | /* Packet Byte #2 */ | ||
930 | uint8_t sample_size:2; /* SS0, SS1 */ | ||
931 | uint8_t sample_frequency:3; | ||
932 | uint8_t rsvd2:3; | ||
933 | /* Packet Byte #3 */ | ||
934 | uint8_t coding_type_private:5; | ||
935 | uint8_t rsvd3:3; | ||
936 | /* Packet Byte #4 */ | ||
937 | uint8_t channel_allocation; | ||
938 | /* Packet Byte #5 */ | ||
939 | uint8_t rsvd4:3; | ||
940 | uint8_t level_shift:4; | ||
941 | uint8_t downmix_inhibit:1; | ||
942 | } audio; | ||
943 | uint8_t payload[28]; | ||
944 | } __attribute__ ((packed)) u; | ||
945 | } __attribute__((packed)); | ||
946 | |||
947 | static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, | ||
948 | struct drm_display_mode * mode) | ||
949 | { | 848 | { |
950 | struct dip_infoframe avi_if = { | 849 | struct dip_infoframe avi_if = { |
951 | .type = DIP_TYPE_AVI, | 850 | .type = DIP_TYPE_AVI, |
952 | .version = DIP_VERSION_AVI, | 851 | .ver = DIP_VERSION_AVI, |
953 | .len = DIP_LEN_AVI, | 852 | .len = DIP_LEN_AVI, |
954 | }; | 853 | }; |
854 | uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; | ||
855 | uint8_t set_buf_index[2] = { 1, 0 }; | ||
856 | uint64_t *data = (uint64_t *)&avi_if; | ||
857 | unsigned i; | ||
858 | |||
859 | intel_dip_infoframe_csum(&avi_if); | ||
860 | |||
861 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, | ||
862 | set_buf_index, 2)) | ||
863 | return false; | ||
955 | 864 | ||
956 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, | 865 | for (i = 0; i < sizeof(avi_if); i += 8) { |
957 | 4 + avi_if.len); | 866 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, |
958 | return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if, | 867 | data, 8)) |
959 | 4 + avi_if.len, | 868 | return false; |
960 | SDVO_HBUF_TX_VSYNC); | 869 | data++; |
870 | } | ||
871 | |||
872 | return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, | ||
873 | &tx_rate, 1); | ||
961 | } | 874 | } |
962 | 875 | ||
963 | static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) | 876 | static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) |
@@ -1111,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1111 | return; | 1024 | return; |
1112 | 1025 | ||
1113 | if (intel_sdvo->is_hdmi && | 1026 | if (intel_sdvo->is_hdmi && |
1114 | !intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) | 1027 | !intel_sdvo_set_avi_infoframe(intel_sdvo)) |
1115 | return; | 1028 | return; |
1116 | 1029 | ||
1117 | if (intel_sdvo->is_tv && | 1030 | if (intel_sdvo->is_tv && |
@@ -1150,7 +1063,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1150 | } | 1063 | } |
1151 | if (intel_crtc->pipe == 1) | 1064 | if (intel_crtc->pipe == 1) |
1152 | sdvox |= SDVO_PIPE_B_SELECT; | 1065 | sdvox |= SDVO_PIPE_B_SELECT; |
1153 | if (intel_sdvo->is_hdmi) | 1066 | if (intel_sdvo->has_audio) |
1154 | sdvox |= SDVO_AUDIO_ENABLE; | 1067 | sdvox |= SDVO_AUDIO_ENABLE; |
1155 | 1068 | ||
1156 | if (INTEL_INFO(dev)->gen >= 4) { | 1069 | if (INTEL_INFO(dev)->gen >= 4) { |
@@ -1476,11 +1389,18 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |||
1476 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 1389 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
1477 | status = connector_status_connected; | 1390 | status = connector_status_connected; |
1478 | intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); | 1391 | intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); |
1392 | intel_sdvo->has_audio = drm_detect_monitor_audio(edid); | ||
1479 | } | 1393 | } |
1480 | connector->display_info.raw_edid = NULL; | 1394 | connector->display_info.raw_edid = NULL; |
1481 | kfree(edid); | 1395 | kfree(edid); |
1482 | } | 1396 | } |
1483 | 1397 | ||
1398 | if (status == connector_status_connected) { | ||
1399 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); | ||
1400 | if (intel_sdvo_connector->force_audio) | ||
1401 | intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; | ||
1402 | } | ||
1403 | |||
1484 | return status; | 1404 | return status; |
1485 | } | 1405 | } |
1486 | 1406 | ||
@@ -1787,6 +1707,21 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1787 | if (ret) | 1707 | if (ret) |
1788 | return ret; | 1708 | return ret; |
1789 | 1709 | ||
1710 | if (property == intel_sdvo_connector->force_audio_property) { | ||
1711 | if (val == intel_sdvo_connector->force_audio) | ||
1712 | return 0; | ||
1713 | |||
1714 | intel_sdvo_connector->force_audio = val; | ||
1715 | |||
1716 | if (val > 0 && intel_sdvo->has_audio) | ||
1717 | return 0; | ||
1718 | if (val < 0 && !intel_sdvo->has_audio) | ||
1719 | return 0; | ||
1720 | |||
1721 | intel_sdvo->has_audio = val > 0; | ||
1722 | goto done; | ||
1723 | } | ||
1724 | |||
1790 | #define CHECK_PROPERTY(name, NAME) \ | 1725 | #define CHECK_PROPERTY(name, NAME) \ |
1791 | if (intel_sdvo_connector->name == property) { \ | 1726 | if (intel_sdvo_connector->name == property) { \ |
1792 | if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ | 1727 | if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ |
@@ -2013,12 +1948,22 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, | |||
2013 | } | 1948 | } |
2014 | 1949 | ||
2015 | static bool | 1950 | static bool |
2016 | intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) | 1951 | intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) |
2017 | { | 1952 | { |
2018 | return intel_sdvo_set_target_output(intel_sdvo, | 1953 | int is_hdmi; |
2019 | device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) && | 1954 | |
2020 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, | 1955 | if (!intel_sdvo_check_supp_encode(intel_sdvo)) |
2021 | &intel_sdvo->is_hdmi, 1); | 1956 | return false; |
1957 | |||
1958 | if (!intel_sdvo_set_target_output(intel_sdvo, | ||
1959 | device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1)) | ||
1960 | return false; | ||
1961 | |||
1962 | is_hdmi = 0; | ||
1963 | if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1)) | ||
1964 | return false; | ||
1965 | |||
1966 | return !!is_hdmi; | ||
2022 | } | 1967 | } |
2023 | 1968 | ||
2024 | static u8 | 1969 | static u8 |
@@ -2078,6 +2023,21 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, | |||
2078 | drm_sysfs_connector_add(&connector->base.base); | 2023 | drm_sysfs_connector_add(&connector->base.base); |
2079 | } | 2024 | } |
2080 | 2025 | ||
2026 | static void | ||
2027 | intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) | ||
2028 | { | ||
2029 | struct drm_device *dev = connector->base.base.dev; | ||
2030 | |||
2031 | connector->force_audio_property = | ||
2032 | drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); | ||
2033 | if (connector->force_audio_property) { | ||
2034 | connector->force_audio_property->values[0] = -1; | ||
2035 | connector->force_audio_property->values[1] = 1; | ||
2036 | drm_connector_attach_property(&connector->base.base, | ||
2037 | connector->force_audio_property, 0); | ||
2038 | } | ||
2039 | } | ||
2040 | |||
2081 | static bool | 2041 | static bool |
2082 | intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | 2042 | intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) |
2083 | { | 2043 | { |
@@ -2104,20 +2064,21 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2104 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2064 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
2105 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2065 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2106 | 2066 | ||
2107 | if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode) | 2067 | if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { |
2108 | && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device) | ||
2109 | && intel_sdvo->is_hdmi) { | ||
2110 | /* enable hdmi encoding mode if supported */ | 2068 | /* enable hdmi encoding mode if supported */ |
2111 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); | 2069 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); |
2112 | intel_sdvo_set_colorimetry(intel_sdvo, | 2070 | intel_sdvo_set_colorimetry(intel_sdvo, |
2113 | SDVO_COLORIMETRY_RGB256); | 2071 | SDVO_COLORIMETRY_RGB256); |
2114 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | 2072 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; |
2073 | intel_sdvo->is_hdmi = true; | ||
2115 | } | 2074 | } |
2116 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2075 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2117 | (1 << INTEL_ANALOG_CLONE_BIT)); | 2076 | (1 << INTEL_ANALOG_CLONE_BIT)); |
2118 | 2077 | ||
2119 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); | 2078 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
2120 | 2079 | ||
2080 | intel_sdvo_add_hdmi_properties(intel_sdvo_connector); | ||
2081 | |||
2121 | return true; | 2082 | return true; |
2122 | } | 2083 | } |
2123 | 2084 | ||