diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 08:54:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 08:54:23 -0400 |
commit | 37be944a0270402f9cda291a930b0286f6dc92f5 (patch) | |
tree | 6a91a9eb86450f4a18a8871f04a1ef810e7b55d6 /drivers/gpu/drm | |
parent | ca836a25435ef1b9914840ed0a310c9b6ac261d1 (diff) | |
parent | 1717c0e23f411147490c7a3312b894f0ea9a5fb1 (diff) |
Merge branch 'drm-core-next' of git://people.freedesktop.org/~airlied/linux
* 'drm-core-next' of git://people.freedesktop.org/~airlied/linux: (290 commits)
Revert "drm/ttm: add a way to bo_wait for either the last read or last write"
Revert "drm/radeon/kms: add a new gem_wait ioctl with read/write flags"
vmwgfx: Don't pass unused arguments to do_dirty functions
vmwgfx: Emulate depth 32 framebuffers
drm/radeon: Lower the severity of the radeon lockup messages.
drm/i915/dp: Fix eDP on PCH DP on CPT/PPT
drm/i915/dp: Introduce is_cpu_edp()
drm/i915: use correct SPD type value
drm/i915: fix ILK+ infoframe support
drm/i915: add DP test request handling
drm/i915: read full receiver capability field during DP hot plug
drm/i915/dp: Remove eDP special cases from bandwidth checks
drm/i915/dp: Fix the math in intel_dp_link_required
drm/i915/panel: Always record the backlight level again (but cleverly)
i915: Move i915_read/write out of line
drm/i915: remove transcoder PLL mashing from mode_set per specs
drm/i915: if transcoder disable fails, say which
drm/i915: set watermarks for third pipe on IVB
drm/i915: export a CPT mode set verification function
drm/i915: fix transcoder PLL select masking
...
Diffstat (limited to 'drivers/gpu/drm')
185 files changed, 18166 insertions, 5482 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index b493663c7ba7..785127cb281b 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -158,3 +158,7 @@ config DRM_SAVAGE | |||
158 | help | 158 | help |
159 | Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister | 159 | Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister |
160 | chipset. If M is selected the module will be called savage. | 160 | chipset. If M is selected the module will be called savage. |
161 | |||
162 | source "drivers/gpu/drm/exynos/Kconfig" | ||
163 | |||
164 | source "drivers/gpu/drm/vmwgfx/Kconfig" | ||
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 89cf05a72d1c..c0496f660707 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -35,4 +35,5 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage/ | |||
35 | obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/ | 35 | obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/ |
36 | obj-$(CONFIG_DRM_VIA) +=via/ | 36 | obj-$(CONFIG_DRM_VIA) +=via/ |
37 | obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ | 37 | obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ |
38 | obj-$(CONFIG_DRM_EXYNOS) +=exynos/ | ||
38 | obj-y += i2c/ | 39 | obj-y += i2c/ |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index f88a9b2c977b..f2366440b738 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -372,11 +372,13 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
372 | encoder_funcs = encoder->helper_private; | 372 | encoder_funcs = encoder->helper_private; |
373 | if (!(ret = encoder_funcs->mode_fixup(encoder, mode, | 373 | if (!(ret = encoder_funcs->mode_fixup(encoder, mode, |
374 | adjusted_mode))) { | 374 | adjusted_mode))) { |
375 | DRM_DEBUG_KMS("Encoder fixup failed\n"); | ||
375 | goto done; | 376 | goto done; |
376 | } | 377 | } |
377 | } | 378 | } |
378 | 379 | ||
379 | if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { | 380 | if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { |
381 | DRM_DEBUG_KMS("CRTC fixup failed\n"); | ||
380 | goto done; | 382 | goto done; |
381 | } | 383 | } |
382 | DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); | 384 | DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); |
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 9d2668a50872..b9dc2629ea9a 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c | |||
@@ -107,11 +107,8 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, | |||
107 | ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, | 107 | ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, |
108 | root, tmp, &drm_debugfs_fops); | 108 | root, tmp, &drm_debugfs_fops); |
109 | if (!ent) { | 109 | if (!ent) { |
110 | char name[64]; | ||
111 | strncpy(name, root->d_name.name, | ||
112 | min(root->d_name.len, 64U)); | ||
113 | DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", | 110 | DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", |
114 | name, files[i].name); | 111 | root->d_name.name, files[i].name); |
115 | kfree(tmp); | 112 | kfree(tmp); |
116 | ret = -1; | 113 | ret = -1; |
117 | goto fail; | 114 | goto fail; |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 93a112d45c1a..7a87e0878f30 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -438,6 +438,8 @@ long drm_ioctl(struct file *filp, | |||
438 | goto err_i1; | 438 | goto err_i1; |
439 | } | 439 | } |
440 | } | 440 | } |
441 | if (asize > usize) | ||
442 | memset(kdata + usize, 0, asize - usize); | ||
441 | } | 443 | } |
442 | 444 | ||
443 | if (cmd & IOC_IN) { | 445 | if (cmd & IOC_IN) { |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7425e5c9bd75..fe39c3570538 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -1319,6 +1319,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, | |||
1319 | #define HDMI_IDENTIFIER 0x000C03 | 1319 | #define HDMI_IDENTIFIER 0x000C03 |
1320 | #define AUDIO_BLOCK 0x01 | 1320 | #define AUDIO_BLOCK 0x01 |
1321 | #define VENDOR_BLOCK 0x03 | 1321 | #define VENDOR_BLOCK 0x03 |
1322 | #define SPEAKER_BLOCK 0x04 | ||
1322 | #define EDID_BASIC_AUDIO (1 << 6) | 1323 | #define EDID_BASIC_AUDIO (1 << 6) |
1323 | 1324 | ||
1324 | /** | 1325 | /** |
@@ -1347,6 +1348,176 @@ u8 *drm_find_cea_extension(struct edid *edid) | |||
1347 | } | 1348 | } |
1348 | EXPORT_SYMBOL(drm_find_cea_extension); | 1349 | EXPORT_SYMBOL(drm_find_cea_extension); |
1349 | 1350 | ||
1351 | static void | ||
1352 | parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db) | ||
1353 | { | ||
1354 | connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */ | ||
1355 | |||
1356 | connector->dvi_dual = db[6] & 1; | ||
1357 | connector->max_tmds_clock = db[7] * 5; | ||
1358 | |||
1359 | connector->latency_present[0] = db[8] >> 7; | ||
1360 | connector->latency_present[1] = (db[8] >> 6) & 1; | ||
1361 | connector->video_latency[0] = db[9]; | ||
1362 | connector->audio_latency[0] = db[10]; | ||
1363 | connector->video_latency[1] = db[11]; | ||
1364 | connector->audio_latency[1] = db[12]; | ||
1365 | |||
1366 | DRM_LOG_KMS("HDMI: DVI dual %d, " | ||
1367 | "max TMDS clock %d, " | ||
1368 | "latency present %d %d, " | ||
1369 | "video latency %d %d, " | ||
1370 | "audio latency %d %d\n", | ||
1371 | connector->dvi_dual, | ||
1372 | connector->max_tmds_clock, | ||
1373 | (int) connector->latency_present[0], | ||
1374 | (int) connector->latency_present[1], | ||
1375 | connector->video_latency[0], | ||
1376 | connector->video_latency[1], | ||
1377 | connector->audio_latency[0], | ||
1378 | connector->audio_latency[1]); | ||
1379 | } | ||
1380 | |||
1381 | static void | ||
1382 | monitor_name(struct detailed_timing *t, void *data) | ||
1383 | { | ||
1384 | if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME) | ||
1385 | *(u8 **)data = t->data.other_data.data.str.str; | ||
1386 | } | ||
1387 | |||
1388 | /** | ||
1389 | * drm_edid_to_eld - build ELD from EDID | ||
1390 | * @connector: connector corresponding to the HDMI/DP sink | ||
1391 | * @edid: EDID to parse | ||
1392 | * | ||
1393 | * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. | ||
1394 | * Some ELD fields are left to the graphics driver caller: | ||
1395 | * - Conn_Type | ||
1396 | * - HDCP | ||
1397 | * - Port_ID | ||
1398 | */ | ||
1399 | void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) | ||
1400 | { | ||
1401 | uint8_t *eld = connector->eld; | ||
1402 | u8 *cea; | ||
1403 | u8 *name; | ||
1404 | u8 *db; | ||
1405 | int sad_count = 0; | ||
1406 | int mnl; | ||
1407 | int dbl; | ||
1408 | |||
1409 | memset(eld, 0, sizeof(connector->eld)); | ||
1410 | |||
1411 | cea = drm_find_cea_extension(edid); | ||
1412 | if (!cea) { | ||
1413 | DRM_DEBUG_KMS("ELD: no CEA Extension found\n"); | ||
1414 | return; | ||
1415 | } | ||
1416 | |||
1417 | name = NULL; | ||
1418 | drm_for_each_detailed_block((u8 *)edid, monitor_name, &name); | ||
1419 | for (mnl = 0; name && mnl < 13; mnl++) { | ||
1420 | if (name[mnl] == 0x0a) | ||
1421 | break; | ||
1422 | eld[20 + mnl] = name[mnl]; | ||
1423 | } | ||
1424 | eld[4] = (cea[1] << 5) | mnl; | ||
1425 | DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20); | ||
1426 | |||
1427 | eld[0] = 2 << 3; /* ELD version: 2 */ | ||
1428 | |||
1429 | eld[16] = edid->mfg_id[0]; | ||
1430 | eld[17] = edid->mfg_id[1]; | ||
1431 | eld[18] = edid->prod_code[0]; | ||
1432 | eld[19] = edid->prod_code[1]; | ||
1433 | |||
1434 | for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) { | ||
1435 | dbl = db[0] & 0x1f; | ||
1436 | |||
1437 | switch ((db[0] & 0xe0) >> 5) { | ||
1438 | case AUDIO_BLOCK: /* Audio Data Block, contains SADs */ | ||
1439 | sad_count = dbl / 3; | ||
1440 | memcpy(eld + 20 + mnl, &db[1], dbl); | ||
1441 | break; | ||
1442 | case SPEAKER_BLOCK: /* Speaker Allocation Data Block */ | ||
1443 | eld[7] = db[1]; | ||
1444 | break; | ||
1445 | case VENDOR_BLOCK: | ||
1446 | /* HDMI Vendor-Specific Data Block */ | ||
1447 | if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0) | ||
1448 | parse_hdmi_vsdb(connector, db); | ||
1449 | break; | ||
1450 | default: | ||
1451 | break; | ||
1452 | } | ||
1453 | } | ||
1454 | eld[5] |= sad_count << 4; | ||
1455 | eld[2] = (20 + mnl + sad_count * 3 + 3) / 4; | ||
1456 | |||
1457 | DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count); | ||
1458 | } | ||
1459 | EXPORT_SYMBOL(drm_edid_to_eld); | ||
1460 | |||
1461 | /** | ||
1462 | * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond | ||
1463 | * @connector: connector associated with the HDMI/DP sink | ||
1464 | * @mode: the display mode | ||
1465 | */ | ||
1466 | int drm_av_sync_delay(struct drm_connector *connector, | ||
1467 | struct drm_display_mode *mode) | ||
1468 | { | ||
1469 | int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); | ||
1470 | int a, v; | ||
1471 | |||
1472 | if (!connector->latency_present[0]) | ||
1473 | return 0; | ||
1474 | if (!connector->latency_present[1]) | ||
1475 | i = 0; | ||
1476 | |||
1477 | a = connector->audio_latency[i]; | ||
1478 | v = connector->video_latency[i]; | ||
1479 | |||
1480 | /* | ||
1481 | * HDMI/DP sink doesn't support audio or video? | ||
1482 | */ | ||
1483 | if (a == 255 || v == 255) | ||
1484 | return 0; | ||
1485 | |||
1486 | /* | ||
1487 | * Convert raw EDID values to millisecond. | ||
1488 | * Treat unknown latency as 0ms. | ||
1489 | */ | ||
1490 | if (a) | ||
1491 | a = min(2 * (a - 1), 500); | ||
1492 | if (v) | ||
1493 | v = min(2 * (v - 1), 500); | ||
1494 | |||
1495 | return max(v - a, 0); | ||
1496 | } | ||
1497 | EXPORT_SYMBOL(drm_av_sync_delay); | ||
1498 | |||
1499 | /** | ||
1500 | * drm_select_eld - select one ELD from multiple HDMI/DP sinks | ||
1501 | * @encoder: the encoder just changed display mode | ||
1502 | * @mode: the adjusted display mode | ||
1503 | * | ||
1504 | * It's possible for one encoder to be associated with multiple HDMI/DP sinks. | ||
1505 | * The policy is now hard coded to simply use the first HDMI/DP sink's ELD. | ||
1506 | */ | ||
1507 | struct drm_connector *drm_select_eld(struct drm_encoder *encoder, | ||
1508 | struct drm_display_mode *mode) | ||
1509 | { | ||
1510 | struct drm_connector *connector; | ||
1511 | struct drm_device *dev = encoder->dev; | ||
1512 | |||
1513 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
1514 | if (connector->encoder == encoder && connector->eld[0]) | ||
1515 | return connector; | ||
1516 | |||
1517 | return NULL; | ||
1518 | } | ||
1519 | EXPORT_SYMBOL(drm_select_eld); | ||
1520 | |||
1350 | /** | 1521 | /** |
1351 | * drm_detect_hdmi_monitor - detect whether monitor is hdmi. | 1522 | * drm_detect_hdmi_monitor - detect whether monitor is hdmi. |
1352 | * @edid: monitor EDID information | 1523 | * @edid: monitor EDID information |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 186d62eb063b..396e60ce8114 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -285,6 +285,94 @@ again: | |||
285 | } | 285 | } |
286 | EXPORT_SYMBOL(drm_gem_handle_create); | 286 | EXPORT_SYMBOL(drm_gem_handle_create); |
287 | 287 | ||
288 | |||
289 | /** | ||
290 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object | ||
291 | * @obj: obj in question | ||
292 | * | ||
293 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). | ||
294 | */ | ||
295 | void | ||
296 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) | ||
297 | { | ||
298 | struct drm_device *dev = obj->dev; | ||
299 | struct drm_gem_mm *mm = dev->mm_private; | ||
300 | struct drm_map_list *list = &obj->map_list; | ||
301 | |||
302 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | ||
303 | drm_mm_put_block(list->file_offset_node); | ||
304 | kfree(list->map); | ||
305 | list->map = NULL; | ||
306 | } | ||
307 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); | ||
308 | |||
309 | /** | ||
310 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object | ||
311 | * @obj: obj in question | ||
312 | * | ||
313 | * GEM memory mapping works by handing back to userspace a fake mmap offset | ||
314 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | ||
315 | * up the object based on the offset and sets up the various memory mapping | ||
316 | * structures. | ||
317 | * | ||
318 | * This routine allocates and attaches a fake offset for @obj. | ||
319 | */ | ||
320 | int | ||
321 | drm_gem_create_mmap_offset(struct drm_gem_object *obj) | ||
322 | { | ||
323 | struct drm_device *dev = obj->dev; | ||
324 | struct drm_gem_mm *mm = dev->mm_private; | ||
325 | struct drm_map_list *list; | ||
326 | struct drm_local_map *map; | ||
327 | int ret = 0; | ||
328 | |||
329 | /* Set the object up for mmap'ing */ | ||
330 | list = &obj->map_list; | ||
331 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); | ||
332 | if (!list->map) | ||
333 | return -ENOMEM; | ||
334 | |||
335 | map = list->map; | ||
336 | map->type = _DRM_GEM; | ||
337 | map->size = obj->size; | ||
338 | map->handle = obj; | ||
339 | |||
340 | /* Get a DRM GEM mmap offset allocated... */ | ||
341 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | ||
342 | obj->size / PAGE_SIZE, 0, 0); | ||
343 | |||
344 | if (!list->file_offset_node) { | ||
345 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | ||
346 | ret = -ENOSPC; | ||
347 | goto out_free_list; | ||
348 | } | ||
349 | |||
350 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | ||
351 | obj->size / PAGE_SIZE, 0); | ||
352 | if (!list->file_offset_node) { | ||
353 | ret = -ENOMEM; | ||
354 | goto out_free_list; | ||
355 | } | ||
356 | |||
357 | list->hash.key = list->file_offset_node->start; | ||
358 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); | ||
359 | if (ret) { | ||
360 | DRM_ERROR("failed to add to map hash\n"); | ||
361 | goto out_free_mm; | ||
362 | } | ||
363 | |||
364 | return 0; | ||
365 | |||
366 | out_free_mm: | ||
367 | drm_mm_put_block(list->file_offset_node); | ||
368 | out_free_list: | ||
369 | kfree(list->map); | ||
370 | list->map = NULL; | ||
371 | |||
372 | return ret; | ||
373 | } | ||
374 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); | ||
375 | |||
288 | /** Returns a reference to the object named by the handle. */ | 376 | /** Returns a reference to the object named by the handle. */ |
289 | struct drm_gem_object * | 377 | struct drm_gem_object * |
290 | drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, | 378 | drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, |
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index 9e5b07efebb7..0f3c4e3cafc3 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c | |||
@@ -95,7 +95,6 @@ int drm_proc_create_files(struct drm_info_list *files, int count, | |||
95 | struct drm_device *dev = minor->dev; | 95 | struct drm_device *dev = minor->dev; |
96 | struct proc_dir_entry *ent; | 96 | struct proc_dir_entry *ent; |
97 | struct drm_info_node *tmp; | 97 | struct drm_info_node *tmp; |
98 | char name[64]; | ||
99 | int i, ret; | 98 | int i, ret; |
100 | 99 | ||
101 | for (i = 0; i < count; i++) { | 100 | for (i = 0; i < count; i++) { |
@@ -118,7 +117,7 @@ int drm_proc_create_files(struct drm_info_list *files, int count, | |||
118 | &drm_proc_fops, tmp); | 117 | &drm_proc_fops, tmp); |
119 | if (!ent) { | 118 | if (!ent) { |
120 | DRM_ERROR("Cannot create /proc/dri/%s/%s\n", | 119 | DRM_ERROR("Cannot create /proc/dri/%s/%s\n", |
121 | name, files[i].name); | 120 | root->name, files[i].name); |
122 | list_del(&tmp->list); | 121 | list_del(&tmp->list); |
123 | kfree(tmp); | 122 | kfree(tmp); |
124 | ret = -1; | 123 | ret = -1; |
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig new file mode 100644 index 000000000000..847466aab435 --- /dev/null +++ b/drivers/gpu/drm/exynos/Kconfig | |||
@@ -0,0 +1,20 @@ | |||
1 | config DRM_EXYNOS | ||
2 | tristate "DRM Support for Samsung SoC EXYNOS Series" | ||
3 | depends on DRM && PLAT_SAMSUNG | ||
4 | default n | ||
5 | select DRM_KMS_HELPER | ||
6 | select FB_CFB_FILLRECT | ||
7 | select FB_CFB_COPYAREA | ||
8 | select FB_CFB_IMAGEBLIT | ||
9 | select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE | ||
10 | help | ||
11 | Choose this option if you have a Samsung SoC EXYNOS chipset. | ||
12 | If M is selected the module will be called exynosdrm. | ||
13 | |||
14 | config DRM_EXYNOS_FIMD | ||
15 | tristate "Exynos DRM FIMD" | ||
16 | depends on DRM_EXYNOS | ||
17 | default n | ||
18 | help | ||
19 | Choose this option if you want to use Exynos FIMD for DRM. | ||
20 | If M is selected, the module will be called exynos_drm_fimd | ||
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile new file mode 100644 index 000000000000..0496d3ff2683 --- /dev/null +++ b/drivers/gpu/drm/exynos/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # | ||
2 | # Makefile for the drm device driver. This driver provides support for the | ||
3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. | ||
4 | |||
5 | ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos | ||
6 | exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \ | ||
7 | exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \ | ||
8 | exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o | ||
9 | |||
10 | obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o | ||
11 | obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c new file mode 100644 index 000000000000..6f8afea94fc9 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c | |||
@@ -0,0 +1,110 @@ | |||
1 | /* exynos_drm_buf.c | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Author: Inki Dae <inki.dae@samsung.com> | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
23 | * OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #include "drmP.h" | ||
27 | #include "drm.h" | ||
28 | |||
29 | #include "exynos_drm_drv.h" | ||
30 | #include "exynos_drm_buf.h" | ||
31 | |||
32 | static DEFINE_MUTEX(exynos_drm_buf_lock); | ||
33 | |||
34 | static int lowlevel_buffer_allocate(struct drm_device *dev, | ||
35 | struct exynos_drm_buf_entry *entry) | ||
36 | { | ||
37 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
38 | |||
39 | entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size, | ||
40 | (dma_addr_t *)&entry->paddr, GFP_KERNEL); | ||
41 | if (!entry->paddr) { | ||
42 | DRM_ERROR("failed to allocate buffer.\n"); | ||
43 | return -ENOMEM; | ||
44 | } | ||
45 | |||
46 | DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n", | ||
47 | (unsigned int)entry->vaddr, entry->paddr, entry->size); | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static void lowlevel_buffer_deallocate(struct drm_device *dev, | ||
53 | struct exynos_drm_buf_entry *entry) | ||
54 | { | ||
55 | DRM_DEBUG_KMS("%s.\n", __FILE__); | ||
56 | |||
57 | if (entry->paddr && entry->vaddr && entry->size) | ||
58 | dma_free_writecombine(dev->dev, entry->size, entry->vaddr, | ||
59 | entry->paddr); | ||
60 | else | ||
61 | DRM_DEBUG_KMS("entry data is null.\n"); | ||
62 | } | ||
63 | |||
64 | struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, | ||
65 | unsigned int size) | ||
66 | { | ||
67 | struct exynos_drm_buf_entry *entry; | ||
68 | |||
69 | DRM_DEBUG_KMS("%s.\n", __FILE__); | ||
70 | |||
71 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | ||
72 | if (!entry) { | ||
73 | DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n"); | ||
74 | return ERR_PTR(-ENOMEM); | ||
75 | } | ||
76 | |||
77 | entry->size = size; | ||
78 | |||
79 | /* | ||
80 | * allocate memory region with size and set the memory information | ||
81 | * to vaddr and paddr of a entry object. | ||
82 | */ | ||
83 | if (lowlevel_buffer_allocate(dev, entry) < 0) { | ||
84 | kfree(entry); | ||
85 | entry = NULL; | ||
86 | return ERR_PTR(-ENOMEM); | ||
87 | } | ||
88 | |||
89 | return entry; | ||
90 | } | ||
91 | |||
92 | void exynos_drm_buf_destroy(struct drm_device *dev, | ||
93 | struct exynos_drm_buf_entry *entry) | ||
94 | { | ||
95 | DRM_DEBUG_KMS("%s.\n", __FILE__); | ||
96 | |||
97 | if (!entry) { | ||
98 | DRM_DEBUG_KMS("entry is null.\n"); | ||
99 | return; | ||
100 | } | ||
101 | |||
102 | lowlevel_buffer_deallocate(dev, entry); | ||
103 | |||
104 | kfree(entry); | ||
105 | entry = NULL; | ||
106 | } | ||
107 | |||
108 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
109 | MODULE_DESCRIPTION("Samsung SoC DRM Buffer Management Module"); | ||
110 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h new file mode 100644 index 000000000000..045d59eab01a --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* exynos_drm_buf.h | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Author: Inki Dae <inki.dae@samsung.com> | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
23 | * OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #ifndef _EXYNOS_DRM_BUF_H_ | ||
27 | #define _EXYNOS_DRM_BUF_H_ | ||
28 | |||
29 | /* | ||
30 | * exynos drm buffer entry structure. | ||
31 | * | ||
32 | * @paddr: physical address of allocated memory. | ||
33 | * @vaddr: kernel virtual address of allocated memory. | ||
34 | * @size: size of allocated memory. | ||
35 | */ | ||
36 | struct exynos_drm_buf_entry { | ||
37 | dma_addr_t paddr; | ||
38 | void __iomem *vaddr; | ||
39 | unsigned int size; | ||
40 | }; | ||
41 | |||
42 | /* allocate physical memory. */ | ||
43 | struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, | ||
44 | unsigned int size); | ||
45 | |||
46 | /* get physical memory information of a drm framebuffer. */ | ||
47 | struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb); | ||
48 | |||
49 | /* remove allocated physical memory. */ | ||
50 | void exynos_drm_buf_destroy(struct drm_device *dev, | ||
51 | struct exynos_drm_buf_entry *entry); | ||
52 | |||
53 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c new file mode 100644 index 000000000000..985d9e768728 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c | |||
@@ -0,0 +1,293 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * Authors: | ||
4 | * Inki Dae <inki.dae@samsung.com> | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | */ | ||
27 | |||
28 | #include "drmP.h" | ||
29 | #include "drm_crtc_helper.h" | ||
30 | |||
31 | #include "exynos_drm_drv.h" | ||
32 | #include "exynos_drm_encoder.h" | ||
33 | |||
34 | #define MAX_EDID 256 | ||
35 | #define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ | ||
36 | drm_connector) | ||
37 | |||
38 | struct exynos_drm_connector { | ||
39 | struct drm_connector drm_connector; | ||
40 | }; | ||
41 | |||
42 | /* convert exynos_video_timings to drm_display_mode */ | ||
43 | static inline void | ||
44 | convert_to_display_mode(struct drm_display_mode *mode, | ||
45 | struct fb_videomode *timing) | ||
46 | { | ||
47 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
48 | |||
49 | mode->clock = timing->pixclock / 1000; | ||
50 | |||
51 | mode->hdisplay = timing->xres; | ||
52 | mode->hsync_start = mode->hdisplay + timing->left_margin; | ||
53 | mode->hsync_end = mode->hsync_start + timing->hsync_len; | ||
54 | mode->htotal = mode->hsync_end + timing->right_margin; | ||
55 | |||
56 | mode->vdisplay = timing->yres; | ||
57 | mode->vsync_start = mode->vdisplay + timing->upper_margin; | ||
58 | mode->vsync_end = mode->vsync_start + timing->vsync_len; | ||
59 | mode->vtotal = mode->vsync_end + timing->lower_margin; | ||
60 | } | ||
61 | |||
62 | /* convert drm_display_mode to exynos_video_timings */ | ||
63 | static inline void | ||
64 | convert_to_video_timing(struct fb_videomode *timing, | ||
65 | struct drm_display_mode *mode) | ||
66 | { | ||
67 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
68 | |||
69 | memset(timing, 0, sizeof(*timing)); | ||
70 | |||
71 | timing->pixclock = mode->clock * 1000; | ||
72 | timing->refresh = mode->vrefresh; | ||
73 | |||
74 | timing->xres = mode->hdisplay; | ||
75 | timing->left_margin = mode->hsync_start - mode->hdisplay; | ||
76 | timing->hsync_len = mode->hsync_end - mode->hsync_start; | ||
77 | timing->right_margin = mode->htotal - mode->hsync_end; | ||
78 | |||
79 | timing->yres = mode->vdisplay; | ||
80 | timing->upper_margin = mode->vsync_start - mode->vdisplay; | ||
81 | timing->vsync_len = mode->vsync_end - mode->vsync_start; | ||
82 | timing->lower_margin = mode->vtotal - mode->vsync_end; | ||
83 | |||
84 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
85 | timing->vmode = FB_VMODE_INTERLACED; | ||
86 | else | ||
87 | timing->vmode = FB_VMODE_NONINTERLACED; | ||
88 | |||
89 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
90 | timing->vmode |= FB_VMODE_DOUBLE; | ||
91 | } | ||
92 | |||
93 | static int exynos_drm_connector_get_modes(struct drm_connector *connector) | ||
94 | { | ||
95 | struct exynos_drm_manager *manager = | ||
96 | exynos_drm_get_manager(connector->encoder); | ||
97 | struct exynos_drm_display *display = manager->display; | ||
98 | unsigned int count; | ||
99 | |||
100 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
101 | |||
102 | if (!display) { | ||
103 | DRM_DEBUG_KMS("display is null.\n"); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * if get_edid() exists then get_edid() callback of hdmi side | ||
109 | * is called to get edid data through i2c interface else | ||
110 | * get timing from the FIMD driver(display controller). | ||
111 | * | ||
112 | * P.S. in case of lcd panel, count is always 1 if success | ||
113 | * because lcd panel has only one mode. | ||
114 | */ | ||
115 | if (display->get_edid) { | ||
116 | int ret; | ||
117 | void *edid; | ||
118 | |||
119 | edid = kzalloc(MAX_EDID, GFP_KERNEL); | ||
120 | if (!edid) { | ||
121 | DRM_ERROR("failed to allocate edid\n"); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | ret = display->get_edid(manager->dev, connector, | ||
126 | edid, MAX_EDID); | ||
127 | if (ret < 0) { | ||
128 | DRM_ERROR("failed to get edid data.\n"); | ||
129 | kfree(edid); | ||
130 | edid = NULL; | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | drm_mode_connector_update_edid_property(connector, edid); | ||
135 | count = drm_add_edid_modes(connector, edid); | ||
136 | |||
137 | kfree(connector->display_info.raw_edid); | ||
138 | connector->display_info.raw_edid = edid; | ||
139 | } else { | ||
140 | struct drm_display_mode *mode = drm_mode_create(connector->dev); | ||
141 | struct fb_videomode *timing; | ||
142 | |||
143 | if (display->get_timing) | ||
144 | timing = display->get_timing(manager->dev); | ||
145 | else { | ||
146 | drm_mode_destroy(connector->dev, mode); | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | convert_to_display_mode(mode, timing); | ||
151 | |||
152 | mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; | ||
153 | drm_mode_set_name(mode); | ||
154 | drm_mode_probed_add(connector, mode); | ||
155 | |||
156 | count = 1; | ||
157 | } | ||
158 | |||
159 | return count; | ||
160 | } | ||
161 | |||
162 | static int exynos_drm_connector_mode_valid(struct drm_connector *connector, | ||
163 | struct drm_display_mode *mode) | ||
164 | { | ||
165 | struct exynos_drm_manager *manager = | ||
166 | exynos_drm_get_manager(connector->encoder); | ||
167 | struct exynos_drm_display *display = manager->display; | ||
168 | struct fb_videomode timing; | ||
169 | int ret = MODE_BAD; | ||
170 | |||
171 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
172 | |||
173 | convert_to_video_timing(&timing, mode); | ||
174 | |||
175 | if (display && display->check_timing) | ||
176 | if (!display->check_timing(manager->dev, (void *)&timing)) | ||
177 | ret = MODE_OK; | ||
178 | |||
179 | return ret; | ||
180 | } | ||
181 | |||
182 | struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) | ||
183 | { | ||
184 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
185 | |||
186 | return connector->encoder; | ||
187 | } | ||
188 | |||
189 | static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { | ||
190 | .get_modes = exynos_drm_connector_get_modes, | ||
191 | .mode_valid = exynos_drm_connector_mode_valid, | ||
192 | .best_encoder = exynos_drm_best_encoder, | ||
193 | }; | ||
194 | |||
195 | /* get detection status of display device. */ | ||
196 | static enum drm_connector_status | ||
197 | exynos_drm_connector_detect(struct drm_connector *connector, bool force) | ||
198 | { | ||
199 | struct exynos_drm_manager *manager = | ||
200 | exynos_drm_get_manager(connector->encoder); | ||
201 | struct exynos_drm_display *display = manager->display; | ||
202 | enum drm_connector_status status = connector_status_disconnected; | ||
203 | |||
204 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
205 | |||
206 | if (display && display->is_connected) { | ||
207 | if (display->is_connected(manager->dev)) | ||
208 | status = connector_status_connected; | ||
209 | else | ||
210 | status = connector_status_disconnected; | ||
211 | } | ||
212 | |||
213 | return status; | ||
214 | } | ||
215 | |||
216 | static void exynos_drm_connector_destroy(struct drm_connector *connector) | ||
217 | { | ||
218 | struct exynos_drm_connector *exynos_connector = | ||
219 | to_exynos_connector(connector); | ||
220 | |||
221 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
222 | |||
223 | drm_sysfs_connector_remove(connector); | ||
224 | drm_connector_cleanup(connector); | ||
225 | kfree(exynos_connector); | ||
226 | } | ||
227 | |||
228 | static struct drm_connector_funcs exynos_connector_funcs = { | ||
229 | .dpms = drm_helper_connector_dpms, | ||
230 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
231 | .detect = exynos_drm_connector_detect, | ||
232 | .destroy = exynos_drm_connector_destroy, | ||
233 | }; | ||
234 | |||
235 | struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | ||
236 | struct drm_encoder *encoder) | ||
237 | { | ||
238 | struct exynos_drm_connector *exynos_connector; | ||
239 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); | ||
240 | struct drm_connector *connector; | ||
241 | int type; | ||
242 | int err; | ||
243 | |||
244 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
245 | |||
246 | exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); | ||
247 | if (!exynos_connector) { | ||
248 | DRM_ERROR("failed to allocate connector\n"); | ||
249 | return NULL; | ||
250 | } | ||
251 | |||
252 | connector = &exynos_connector->drm_connector; | ||
253 | |||
254 | switch (manager->display->type) { | ||
255 | case EXYNOS_DISPLAY_TYPE_HDMI: | ||
256 | type = DRM_MODE_CONNECTOR_HDMIA; | ||
257 | break; | ||
258 | default: | ||
259 | type = DRM_MODE_CONNECTOR_Unknown; | ||
260 | break; | ||
261 | } | ||
262 | |||
263 | drm_connector_init(dev, connector, &exynos_connector_funcs, type); | ||
264 | drm_connector_helper_add(connector, &exynos_connector_helper_funcs); | ||
265 | |||
266 | err = drm_sysfs_connector_add(connector); | ||
267 | if (err) | ||
268 | goto err_connector; | ||
269 | |||
270 | connector->encoder = encoder; | ||
271 | err = drm_mode_connector_attach_encoder(connector, encoder); | ||
272 | if (err) { | ||
273 | DRM_ERROR("failed to attach a connector to a encoder\n"); | ||
274 | goto err_sysfs; | ||
275 | } | ||
276 | |||
277 | DRM_DEBUG_KMS("connector has been created\n"); | ||
278 | |||
279 | return connector; | ||
280 | |||
281 | err_sysfs: | ||
282 | drm_sysfs_connector_remove(connector); | ||
283 | err_connector: | ||
284 | drm_connector_cleanup(connector); | ||
285 | kfree(exynos_connector); | ||
286 | return NULL; | ||
287 | } | ||
288 | |||
289 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
290 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
291 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
292 | MODULE_DESCRIPTION("Samsung SoC DRM Connector Driver"); | ||
293 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h new file mode 100644 index 000000000000..1c7b2b5b579c --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.h | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * Authors: | ||
4 | * Inki Dae <inki.dae@samsung.com> | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | */ | ||
27 | |||
28 | #ifndef _EXYNOS_DRM_CONNECTOR_H_ | ||
29 | #define _EXYNOS_DRM_CONNECTOR_H_ | ||
30 | |||
31 | struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | ||
32 | struct drm_encoder *encoder); | ||
33 | |||
34 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c new file mode 100644 index 000000000000..661a03571d0c --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* exynos_drm_core.c | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Author: | ||
5 | * Inki Dae <inki.dae@samsung.com> | ||
6 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
7 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
8 | * | ||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
10 | * copy of this software and associated documentation files (the "Software"), | ||
11 | * to deal in the Software without restriction, including without limitation | ||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
14 | * Software is furnished to do so, subject to the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice (including the next | ||
17 | * paragraph) shall be included in all copies or substantial portions of the | ||
18 | * Software. | ||
19 | * | ||
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
26 | * OTHER DEALINGS IN THE SOFTWARE. | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "exynos_drm_drv.h" | ||
31 | #include "exynos_drm_encoder.h" | ||
32 | #include "exynos_drm_connector.h" | ||
33 | #include "exynos_drm_fbdev.h" | ||
34 | |||
35 | static DEFINE_MUTEX(exynos_drm_mutex); | ||
36 | static LIST_HEAD(exynos_drm_subdrv_list); | ||
37 | static struct drm_device *drm_dev; | ||
38 | |||
39 | static int exynos_drm_subdrv_probe(struct drm_device *dev, | ||
40 | struct exynos_drm_subdrv *subdrv) | ||
41 | { | ||
42 | struct drm_encoder *encoder; | ||
43 | struct drm_connector *connector; | ||
44 | |||
45 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
46 | |||
47 | if (subdrv->probe) { | ||
48 | int ret; | ||
49 | |||
50 | /* | ||
51 | * this probe callback would be called by sub driver | ||
52 | * after setting of all resources to this sub driver, | ||
53 | * such as clock, irq and register map are done or by load() | ||
54 | * of exynos drm driver. | ||
55 | * | ||
56 | * P.S. note that this driver is considered for modularization. | ||
57 | */ | ||
58 | ret = subdrv->probe(dev, subdrv->manager.dev); | ||
59 | if (ret) | ||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | /* create and initialize a encoder for this sub driver. */ | ||
64 | encoder = exynos_drm_encoder_create(dev, &subdrv->manager, | ||
65 | (1 << MAX_CRTC) - 1); | ||
66 | if (!encoder) { | ||
67 | DRM_ERROR("failed to create encoder\n"); | ||
68 | return -EFAULT; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * create and initialize a connector for this sub driver and | ||
73 | * attach the encoder created above to the connector. | ||
74 | */ | ||
75 | connector = exynos_drm_connector_create(dev, encoder); | ||
76 | if (!connector) { | ||
77 | DRM_ERROR("failed to create connector\n"); | ||
78 | encoder->funcs->destroy(encoder); | ||
79 | return -EFAULT; | ||
80 | } | ||
81 | |||
82 | subdrv->encoder = encoder; | ||
83 | subdrv->connector = connector; | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static void exynos_drm_subdrv_remove(struct drm_device *dev, | ||
89 | struct exynos_drm_subdrv *subdrv) | ||
90 | { | ||
91 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
92 | |||
93 | if (subdrv->remove) | ||
94 | subdrv->remove(dev); | ||
95 | |||
96 | if (subdrv->encoder) { | ||
97 | struct drm_encoder *encoder = subdrv->encoder; | ||
98 | encoder->funcs->destroy(encoder); | ||
99 | subdrv->encoder = NULL; | ||
100 | } | ||
101 | |||
102 | if (subdrv->connector) { | ||
103 | struct drm_connector *connector = subdrv->connector; | ||
104 | connector->funcs->destroy(connector); | ||
105 | subdrv->connector = NULL; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | int exynos_drm_device_register(struct drm_device *dev) | ||
110 | { | ||
111 | struct exynos_drm_subdrv *subdrv, *n; | ||
112 | int err; | ||
113 | |||
114 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
115 | |||
116 | if (!dev) | ||
117 | return -EINVAL; | ||
118 | |||
119 | if (drm_dev) { | ||
120 | DRM_ERROR("Already drm device were registered\n"); | ||
121 | return -EBUSY; | ||
122 | } | ||
123 | |||
124 | mutex_lock(&exynos_drm_mutex); | ||
125 | list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) { | ||
126 | err = exynos_drm_subdrv_probe(dev, subdrv); | ||
127 | if (err) { | ||
128 | DRM_DEBUG("exynos drm subdrv probe failed.\n"); | ||
129 | list_del(&subdrv->list); | ||
130 | } | ||
131 | } | ||
132 | |||
133 | drm_dev = dev; | ||
134 | mutex_unlock(&exynos_drm_mutex); | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | EXPORT_SYMBOL_GPL(exynos_drm_device_register); | ||
139 | |||
140 | int exynos_drm_device_unregister(struct drm_device *dev) | ||
141 | { | ||
142 | struct exynos_drm_subdrv *subdrv; | ||
143 | |||
144 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
145 | |||
146 | if (!dev || dev != drm_dev) { | ||
147 | WARN(1, "Unexpected drm device unregister!\n"); | ||
148 | return -EINVAL; | ||
149 | } | ||
150 | |||
151 | mutex_lock(&exynos_drm_mutex); | ||
152 | list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) | ||
153 | exynos_drm_subdrv_remove(dev, subdrv); | ||
154 | |||
155 | drm_dev = NULL; | ||
156 | mutex_unlock(&exynos_drm_mutex); | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | EXPORT_SYMBOL_GPL(exynos_drm_device_unregister); | ||
161 | |||
162 | static int exynos_drm_mode_group_reinit(struct drm_device *dev) | ||
163 | { | ||
164 | struct drm_mode_group *group = &dev->primary->mode_group; | ||
165 | uint32_t *id_list = group->id_list; | ||
166 | int ret; | ||
167 | |||
168 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
169 | |||
170 | ret = drm_mode_group_init_legacy_group(dev, group); | ||
171 | if (ret < 0) | ||
172 | return ret; | ||
173 | |||
174 | kfree(id_list); | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) | ||
179 | { | ||
180 | int err; | ||
181 | |||
182 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
183 | |||
184 | if (!subdrv) | ||
185 | return -EINVAL; | ||
186 | |||
187 | mutex_lock(&exynos_drm_mutex); | ||
188 | if (drm_dev) { | ||
189 | err = exynos_drm_subdrv_probe(drm_dev, subdrv); | ||
190 | if (err) { | ||
191 | DRM_ERROR("failed to probe exynos drm subdrv\n"); | ||
192 | mutex_unlock(&exynos_drm_mutex); | ||
193 | return err; | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * if any specific driver such as fimd or hdmi driver called | ||
198 | * exynos_drm_subdrv_register() later than drm_load(), | ||
199 | * the fb helper should be re-initialized and re-configured. | ||
200 | */ | ||
201 | err = exynos_drm_fbdev_reinit(drm_dev); | ||
202 | if (err) { | ||
203 | DRM_ERROR("failed to reinitialize exynos drm fbdev\n"); | ||
204 | exynos_drm_subdrv_remove(drm_dev, subdrv); | ||
205 | mutex_unlock(&exynos_drm_mutex); | ||
206 | return err; | ||
207 | } | ||
208 | |||
209 | err = exynos_drm_mode_group_reinit(drm_dev); | ||
210 | if (err) { | ||
211 | DRM_ERROR("failed to reinitialize mode group\n"); | ||
212 | exynos_drm_fbdev_fini(drm_dev); | ||
213 | exynos_drm_subdrv_remove(drm_dev, subdrv); | ||
214 | mutex_unlock(&exynos_drm_mutex); | ||
215 | return err; | ||
216 | } | ||
217 | } | ||
218 | |||
219 | subdrv->drm_dev = drm_dev; | ||
220 | |||
221 | list_add_tail(&subdrv->list, &exynos_drm_subdrv_list); | ||
222 | mutex_unlock(&exynos_drm_mutex); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); | ||
227 | |||
228 | int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) | ||
229 | { | ||
230 | int ret = -EFAULT; | ||
231 | |||
232 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
233 | |||
234 | if (!subdrv) { | ||
235 | DRM_DEBUG("Unexpected exynos drm subdrv unregister!\n"); | ||
236 | return ret; | ||
237 | } | ||
238 | |||
239 | mutex_lock(&exynos_drm_mutex); | ||
240 | if (drm_dev) { | ||
241 | exynos_drm_subdrv_remove(drm_dev, subdrv); | ||
242 | list_del(&subdrv->list); | ||
243 | |||
244 | /* | ||
245 | * fb helper should be updated once a sub driver is released | ||
246 | * to re-configure crtc and connector and also to re-setup | ||
247 | * drm framebuffer. | ||
248 | */ | ||
249 | ret = exynos_drm_fbdev_reinit(drm_dev); | ||
250 | if (ret < 0) { | ||
251 | DRM_ERROR("failed fb helper reinit.\n"); | ||
252 | goto fail; | ||
253 | } | ||
254 | |||
255 | ret = exynos_drm_mode_group_reinit(drm_dev); | ||
256 | if (ret < 0) { | ||
257 | DRM_ERROR("failed drm mode group reinit.\n"); | ||
258 | goto fail; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | fail: | ||
263 | mutex_unlock(&exynos_drm_mutex); | ||
264 | return ret; | ||
265 | } | ||
266 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); | ||
267 | |||
268 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
269 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
270 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
271 | MODULE_DESCRIPTION("Samsung SoC DRM Core Driver"); | ||
272 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c new file mode 100644 index 000000000000..9337e5e2dbb6 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
@@ -0,0 +1,381 @@ | |||
1 | /* exynos_drm_crtc.c | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Authors: | ||
5 | * Inki Dae <inki.dae@samsung.com> | ||
6 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
7 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
8 | * | ||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
10 | * copy of this software and associated documentation files (the "Software"), | ||
11 | * to deal in the Software without restriction, including without limitation | ||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
14 | * Software is furnished to do so, subject to the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice (including the next | ||
17 | * paragraph) shall be included in all copies or substantial portions of the | ||
18 | * Software. | ||
19 | * | ||
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
26 | * OTHER DEALINGS IN THE SOFTWARE. | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm_crtc_helper.h" | ||
31 | |||
32 | #include "exynos_drm_drv.h" | ||
33 | #include "exynos_drm_fb.h" | ||
34 | #include "exynos_drm_encoder.h" | ||
35 | #include "exynos_drm_buf.h" | ||
36 | |||
37 | #define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ | ||
38 | drm_crtc) | ||
39 | |||
40 | /* | ||
41 | * Exynos specific crtc postion structure. | ||
42 | * | ||
43 | * @fb_x: offset x on a framebuffer to be displyed | ||
44 | * - the unit is screen coordinates. | ||
45 | * @fb_y: offset y on a framebuffer to be displayed | ||
46 | * - the unit is screen coordinates. | ||
47 | * @crtc_x: offset x on hardware screen. | ||
48 | * @crtc_y: offset y on hardware screen. | ||
49 | * @crtc_w: width of hardware screen. | ||
50 | * @crtc_h: height of hardware screen. | ||
51 | */ | ||
52 | struct exynos_drm_crtc_pos { | ||
53 | unsigned int fb_x; | ||
54 | unsigned int fb_y; | ||
55 | unsigned int crtc_x; | ||
56 | unsigned int crtc_y; | ||
57 | unsigned int crtc_w; | ||
58 | unsigned int crtc_h; | ||
59 | }; | ||
60 | |||
61 | /* | ||
62 | * Exynos specific crtc structure. | ||
63 | * | ||
64 | * @drm_crtc: crtc object. | ||
65 | * @overlay: contain information common to display controller and hdmi and | ||
66 | * contents of this overlay object would be copied to sub driver size. | ||
67 | * @pipe: a crtc index created at load() with a new crtc object creation | ||
68 | * and the crtc object would be set to private->crtc array | ||
69 | * to get a crtc object corresponding to this pipe from private->crtc | ||
70 | * array when irq interrupt occured. the reason of using this pipe is that | ||
71 | * drm framework doesn't support multiple irq yet. | ||
72 | * we can refer to the crtc to current hardware interrupt occured through | ||
73 | * this pipe value. | ||
74 | */ | ||
75 | struct exynos_drm_crtc { | ||
76 | struct drm_crtc drm_crtc; | ||
77 | struct exynos_drm_overlay overlay; | ||
78 | unsigned int pipe; | ||
79 | }; | ||
80 | |||
81 | static void exynos_drm_crtc_apply(struct drm_crtc *crtc) | ||
82 | { | ||
83 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | ||
84 | struct exynos_drm_overlay *overlay = &exynos_crtc->overlay; | ||
85 | |||
86 | exynos_drm_fn_encoder(crtc, overlay, | ||
87 | exynos_drm_encoder_crtc_mode_set); | ||
88 | exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit); | ||
89 | } | ||
90 | |||
91 | static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, | ||
92 | struct drm_framebuffer *fb, | ||
93 | struct drm_display_mode *mode, | ||
94 | struct exynos_drm_crtc_pos *pos) | ||
95 | { | ||
96 | struct exynos_drm_buf_entry *entry; | ||
97 | unsigned int actual_w; | ||
98 | unsigned int actual_h; | ||
99 | |||
100 | entry = exynos_drm_fb_get_buf(fb); | ||
101 | if (!entry) { | ||
102 | DRM_LOG_KMS("entry is null.\n"); | ||
103 | return -EFAULT; | ||
104 | } | ||
105 | |||
106 | overlay->paddr = entry->paddr; | ||
107 | overlay->vaddr = entry->vaddr; | ||
108 | |||
109 | DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", | ||
110 | (unsigned long)overlay->vaddr, | ||
111 | (unsigned long)overlay->paddr); | ||
112 | |||
113 | actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); | ||
114 | actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); | ||
115 | |||
116 | /* set drm framebuffer data. */ | ||
117 | overlay->fb_x = pos->fb_x; | ||
118 | overlay->fb_y = pos->fb_y; | ||
119 | overlay->fb_width = fb->width; | ||
120 | overlay->fb_height = fb->height; | ||
121 | overlay->bpp = fb->bits_per_pixel; | ||
122 | overlay->pitch = fb->pitch; | ||
123 | |||
124 | /* set overlay range to be displayed. */ | ||
125 | overlay->crtc_x = pos->crtc_x; | ||
126 | overlay->crtc_y = pos->crtc_y; | ||
127 | overlay->crtc_width = actual_w; | ||
128 | overlay->crtc_height = actual_h; | ||
129 | |||
130 | /* set drm mode data. */ | ||
131 | overlay->mode_width = mode->hdisplay; | ||
132 | overlay->mode_height = mode->vdisplay; | ||
133 | overlay->refresh = mode->vrefresh; | ||
134 | overlay->scan_flag = mode->flags; | ||
135 | |||
136 | DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)", | ||
137 | overlay->crtc_x, overlay->crtc_y, | ||
138 | overlay->crtc_width, overlay->crtc_height); | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int exynos_drm_crtc_update(struct drm_crtc *crtc) | ||
144 | { | ||
145 | struct exynos_drm_crtc *exynos_crtc; | ||
146 | struct exynos_drm_overlay *overlay; | ||
147 | struct exynos_drm_crtc_pos pos; | ||
148 | struct drm_display_mode *mode = &crtc->mode; | ||
149 | struct drm_framebuffer *fb = crtc->fb; | ||
150 | |||
151 | if (!mode || !fb) | ||
152 | return -EINVAL; | ||
153 | |||
154 | exynos_crtc = to_exynos_crtc(crtc); | ||
155 | overlay = &exynos_crtc->overlay; | ||
156 | |||
157 | memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos)); | ||
158 | |||
159 | /* it means the offset of framebuffer to be displayed. */ | ||
160 | pos.fb_x = crtc->x; | ||
161 | pos.fb_y = crtc->y; | ||
162 | |||
163 | /* OSD position to be displayed. */ | ||
164 | pos.crtc_x = 0; | ||
165 | pos.crtc_y = 0; | ||
166 | pos.crtc_w = fb->width - crtc->x; | ||
167 | pos.crtc_h = fb->height - crtc->y; | ||
168 | |||
169 | return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos); | ||
170 | } | ||
171 | |||
172 | static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
173 | { | ||
174 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
175 | |||
176 | /* TODO */ | ||
177 | } | ||
178 | |||
179 | static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) | ||
180 | { | ||
181 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
182 | |||
183 | /* drm framework doesn't check NULL. */ | ||
184 | } | ||
185 | |||
186 | static void exynos_drm_crtc_commit(struct drm_crtc *crtc) | ||
187 | { | ||
188 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
189 | |||
190 | /* drm framework doesn't check NULL. */ | ||
191 | } | ||
192 | |||
193 | static bool | ||
194 | exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, | ||
195 | struct drm_display_mode *mode, | ||
196 | struct drm_display_mode *adjusted_mode) | ||
197 | { | ||
198 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
199 | |||
200 | /* drm framework doesn't check NULL */ | ||
201 | return true; | ||
202 | } | ||
203 | |||
204 | static int | ||
205 | exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | ||
206 | struct drm_display_mode *adjusted_mode, int x, int y, | ||
207 | struct drm_framebuffer *old_fb) | ||
208 | { | ||
209 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
210 | |||
211 | mode = adjusted_mode; | ||
212 | |||
213 | return exynos_drm_crtc_update(crtc); | ||
214 | } | ||
215 | |||
216 | static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | ||
217 | struct drm_framebuffer *old_fb) | ||
218 | { | ||
219 | int ret; | ||
220 | |||
221 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
222 | |||
223 | ret = exynos_drm_crtc_update(crtc); | ||
224 | if (ret) | ||
225 | return ret; | ||
226 | |||
227 | exynos_drm_crtc_apply(crtc); | ||
228 | |||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc) | ||
233 | { | ||
234 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
235 | /* drm framework doesn't check NULL */ | ||
236 | } | ||
237 | |||
238 | static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { | ||
239 | .dpms = exynos_drm_crtc_dpms, | ||
240 | .prepare = exynos_drm_crtc_prepare, | ||
241 | .commit = exynos_drm_crtc_commit, | ||
242 | .mode_fixup = exynos_drm_crtc_mode_fixup, | ||
243 | .mode_set = exynos_drm_crtc_mode_set, | ||
244 | .mode_set_base = exynos_drm_crtc_mode_set_base, | ||
245 | .load_lut = exynos_drm_crtc_load_lut, | ||
246 | }; | ||
247 | |||
248 | static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, | ||
249 | struct drm_framebuffer *fb, | ||
250 | struct drm_pending_vblank_event *event) | ||
251 | { | ||
252 | struct drm_device *dev = crtc->dev; | ||
253 | struct exynos_drm_private *dev_priv = dev->dev_private; | ||
254 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | ||
255 | struct drm_framebuffer *old_fb = crtc->fb; | ||
256 | int ret = -EINVAL; | ||
257 | |||
258 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
259 | |||
260 | mutex_lock(&dev->struct_mutex); | ||
261 | |||
262 | if (event) { | ||
263 | /* | ||
264 | * the pipe from user always is 0 so we can set pipe number | ||
265 | * of current owner to event. | ||
266 | */ | ||
267 | event->pipe = exynos_crtc->pipe; | ||
268 | |||
269 | list_add_tail(&event->base.link, | ||
270 | &dev_priv->pageflip_event_list); | ||
271 | |||
272 | ret = drm_vblank_get(dev, exynos_crtc->pipe); | ||
273 | if (ret) { | ||
274 | DRM_DEBUG("failed to acquire vblank counter\n"); | ||
275 | list_del(&event->base.link); | ||
276 | |||
277 | goto out; | ||
278 | } | ||
279 | |||
280 | crtc->fb = fb; | ||
281 | ret = exynos_drm_crtc_update(crtc); | ||
282 | if (ret) { | ||
283 | crtc->fb = old_fb; | ||
284 | drm_vblank_put(dev, exynos_crtc->pipe); | ||
285 | list_del(&event->base.link); | ||
286 | |||
287 | goto out; | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * the values related to a buffer of the drm framebuffer | ||
292 | * to be applied should be set at here. because these values | ||
293 | * first, are set to shadow registers and then to | ||
294 | * real registers at vsync front porch period. | ||
295 | */ | ||
296 | exynos_drm_crtc_apply(crtc); | ||
297 | } | ||
298 | out: | ||
299 | mutex_unlock(&dev->struct_mutex); | ||
300 | return ret; | ||
301 | } | ||
302 | |||
303 | static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) | ||
304 | { | ||
305 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | ||
306 | struct exynos_drm_private *private = crtc->dev->dev_private; | ||
307 | |||
308 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
309 | |||
310 | private->crtc[exynos_crtc->pipe] = NULL; | ||
311 | |||
312 | drm_crtc_cleanup(crtc); | ||
313 | kfree(exynos_crtc); | ||
314 | } | ||
315 | |||
316 | static struct drm_crtc_funcs exynos_crtc_funcs = { | ||
317 | .set_config = drm_crtc_helper_set_config, | ||
318 | .page_flip = exynos_drm_crtc_page_flip, | ||
319 | .destroy = exynos_drm_crtc_destroy, | ||
320 | }; | ||
321 | |||
322 | struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev, | ||
323 | struct drm_crtc *crtc) | ||
324 | { | ||
325 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | ||
326 | |||
327 | return &exynos_crtc->overlay; | ||
328 | } | ||
329 | |||
330 | int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr) | ||
331 | { | ||
332 | struct exynos_drm_crtc *exynos_crtc; | ||
333 | struct exynos_drm_private *private = dev->dev_private; | ||
334 | struct drm_crtc *crtc; | ||
335 | |||
336 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
337 | |||
338 | exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); | ||
339 | if (!exynos_crtc) { | ||
340 | DRM_ERROR("failed to allocate exynos crtc\n"); | ||
341 | return -ENOMEM; | ||
342 | } | ||
343 | |||
344 | exynos_crtc->pipe = nr; | ||
345 | crtc = &exynos_crtc->drm_crtc; | ||
346 | |||
347 | private->crtc[nr] = crtc; | ||
348 | |||
349 | drm_crtc_init(dev, crtc, &exynos_crtc_funcs); | ||
350 | drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs); | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc) | ||
356 | { | ||
357 | struct exynos_drm_private *private = dev->dev_private; | ||
358 | |||
359 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
360 | |||
361 | exynos_drm_fn_encoder(private->crtc[crtc], &crtc, | ||
362 | exynos_drm_enable_vblank); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) | ||
368 | { | ||
369 | struct exynos_drm_private *private = dev->dev_private; | ||
370 | |||
371 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
372 | |||
373 | exynos_drm_fn_encoder(private->crtc[crtc], &crtc, | ||
374 | exynos_drm_disable_vblank); | ||
375 | } | ||
376 | |||
377 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
378 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
379 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
380 | MODULE_DESCRIPTION("Samsung SoC DRM CRTC Driver"); | ||
381 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h new file mode 100644 index 000000000000..c584042d6d2c --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* exynos_drm_crtc.h | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Authors: | ||
5 | * Inki Dae <inki.dae@samsung.com> | ||
6 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
7 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
8 | * | ||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
10 | * copy of this software and associated documentation files (the "Software"), | ||
11 | * to deal in the Software without restriction, including without limitation | ||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
14 | * Software is furnished to do so, subject to the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice (including the next | ||
17 | * paragraph) shall be included in all copies or substantial portions of the | ||
18 | * Software. | ||
19 | * | ||
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
26 | * OTHER DEALINGS IN THE SOFTWARE. | ||
27 | */ | ||
28 | |||
29 | #ifndef _EXYNOS_DRM_CRTC_H_ | ||
30 | #define _EXYNOS_DRM_CRTC_H_ | ||
31 | |||
32 | struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev, | ||
33 | struct drm_crtc *crtc); | ||
34 | int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr); | ||
35 | int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); | ||
36 | void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); | ||
37 | |||
38 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c new file mode 100644 index 000000000000..83810cbe3c17 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -0,0 +1,244 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * Authors: | ||
4 | * Inki Dae <inki.dae@samsung.com> | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | */ | ||
27 | |||
28 | #include "drmP.h" | ||
29 | #include "drm.h" | ||
30 | |||
31 | #include <drm/exynos_drm.h> | ||
32 | |||
33 | #include "exynos_drm_drv.h" | ||
34 | #include "exynos_drm_crtc.h" | ||
35 | #include "exynos_drm_fbdev.h" | ||
36 | #include "exynos_drm_fb.h" | ||
37 | #include "exynos_drm_gem.h" | ||
38 | |||
39 | #define DRIVER_NAME "exynos-drm" | ||
40 | #define DRIVER_DESC "Samsung SoC DRM" | ||
41 | #define DRIVER_DATE "20110530" | ||
42 | #define DRIVER_MAJOR 1 | ||
43 | #define DRIVER_MINOR 0 | ||
44 | |||
45 | static int exynos_drm_load(struct drm_device *dev, unsigned long flags) | ||
46 | { | ||
47 | struct exynos_drm_private *private; | ||
48 | int ret; | ||
49 | int nr; | ||
50 | |||
51 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
52 | |||
53 | private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); | ||
54 | if (!private) { | ||
55 | DRM_ERROR("failed to allocate private\n"); | ||
56 | return -ENOMEM; | ||
57 | } | ||
58 | |||
59 | INIT_LIST_HEAD(&private->pageflip_event_list); | ||
60 | dev->dev_private = (void *)private; | ||
61 | |||
62 | drm_mode_config_init(dev); | ||
63 | |||
64 | exynos_drm_mode_config_init(dev); | ||
65 | |||
66 | /* | ||
67 | * EXYNOS4 is enough to have two CRTCs and each crtc would be used | ||
68 | * without dependency of hardware. | ||
69 | */ | ||
70 | for (nr = 0; nr < MAX_CRTC; nr++) { | ||
71 | ret = exynos_drm_crtc_create(dev, nr); | ||
72 | if (ret) | ||
73 | goto err_crtc; | ||
74 | } | ||
75 | |||
76 | ret = drm_vblank_init(dev, MAX_CRTC); | ||
77 | if (ret) | ||
78 | goto err_crtc; | ||
79 | |||
80 | /* | ||
81 | * probe sub drivers such as display controller and hdmi driver, | ||
82 | * that were registered at probe() of platform driver | ||
83 | * to the sub driver and create encoder and connector for them. | ||
84 | */ | ||
85 | ret = exynos_drm_device_register(dev); | ||
86 | if (ret) | ||
87 | goto err_vblank; | ||
88 | |||
89 | /* | ||
90 | * create and configure fb helper and also exynos specific | ||
91 | * fbdev object. | ||
92 | */ | ||
93 | ret = exynos_drm_fbdev_init(dev); | ||
94 | if (ret) { | ||
95 | DRM_ERROR("failed to initialize drm fbdev\n"); | ||
96 | goto err_drm_device; | ||
97 | } | ||
98 | |||
99 | return 0; | ||
100 | |||
101 | err_drm_device: | ||
102 | exynos_drm_device_unregister(dev); | ||
103 | err_vblank: | ||
104 | drm_vblank_cleanup(dev); | ||
105 | err_crtc: | ||
106 | drm_mode_config_cleanup(dev); | ||
107 | kfree(private); | ||
108 | |||
109 | return ret; | ||
110 | } | ||
111 | |||
112 | static int exynos_drm_unload(struct drm_device *dev) | ||
113 | { | ||
114 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
115 | |||
116 | exynos_drm_fbdev_fini(dev); | ||
117 | exynos_drm_device_unregister(dev); | ||
118 | drm_vblank_cleanup(dev); | ||
119 | drm_mode_config_cleanup(dev); | ||
120 | kfree(dev->dev_private); | ||
121 | |||
122 | dev->dev_private = NULL; | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static void exynos_drm_preclose(struct drm_device *dev, | ||
128 | struct drm_file *file_priv) | ||
129 | { | ||
130 | struct exynos_drm_private *dev_priv = dev->dev_private; | ||
131 | |||
132 | /* | ||
133 | * drm framework frees all events at release time, | ||
134 | * so private event list should be cleared. | ||
135 | */ | ||
136 | if (!list_empty(&dev_priv->pageflip_event_list)) | ||
137 | INIT_LIST_HEAD(&dev_priv->pageflip_event_list); | ||
138 | } | ||
139 | |||
140 | static void exynos_drm_lastclose(struct drm_device *dev) | ||
141 | { | ||
142 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
143 | |||
144 | exynos_drm_fbdev_restore_mode(dev); | ||
145 | } | ||
146 | |||
147 | static struct vm_operations_struct exynos_drm_gem_vm_ops = { | ||
148 | .fault = exynos_drm_gem_fault, | ||
149 | .open = drm_gem_vm_open, | ||
150 | .close = drm_gem_vm_close, | ||
151 | }; | ||
152 | |||
153 | static struct drm_ioctl_desc exynos_ioctls[] = { | ||
154 | DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, | ||
155 | DRM_UNLOCKED | DRM_AUTH), | ||
156 | DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET, | ||
157 | exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED | | ||
158 | DRM_AUTH), | ||
159 | DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP, | ||
160 | exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH), | ||
161 | }; | ||
162 | |||
163 | static struct drm_driver exynos_drm_driver = { | ||
164 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM | | ||
165 | DRIVER_MODESET | DRIVER_GEM, | ||
166 | .load = exynos_drm_load, | ||
167 | .unload = exynos_drm_unload, | ||
168 | .preclose = exynos_drm_preclose, | ||
169 | .lastclose = exynos_drm_lastclose, | ||
170 | .get_vblank_counter = drm_vblank_count, | ||
171 | .enable_vblank = exynos_drm_crtc_enable_vblank, | ||
172 | .disable_vblank = exynos_drm_crtc_disable_vblank, | ||
173 | .gem_init_object = exynos_drm_gem_init_object, | ||
174 | .gem_free_object = exynos_drm_gem_free_object, | ||
175 | .gem_vm_ops = &exynos_drm_gem_vm_ops, | ||
176 | .dumb_create = exynos_drm_gem_dumb_create, | ||
177 | .dumb_map_offset = exynos_drm_gem_dumb_map_offset, | ||
178 | .dumb_destroy = exynos_drm_gem_dumb_destroy, | ||
179 | .ioctls = exynos_ioctls, | ||
180 | .fops = { | ||
181 | .owner = THIS_MODULE, | ||
182 | .open = drm_open, | ||
183 | .mmap = exynos_drm_gem_mmap, | ||
184 | .poll = drm_poll, | ||
185 | .read = drm_read, | ||
186 | .unlocked_ioctl = drm_ioctl, | ||
187 | .release = drm_release, | ||
188 | }, | ||
189 | .name = DRIVER_NAME, | ||
190 | .desc = DRIVER_DESC, | ||
191 | .date = DRIVER_DATE, | ||
192 | .major = DRIVER_MAJOR, | ||
193 | .minor = DRIVER_MINOR, | ||
194 | }; | ||
195 | |||
196 | static int exynos_drm_platform_probe(struct platform_device *pdev) | ||
197 | { | ||
198 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
199 | |||
200 | exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls); | ||
201 | |||
202 | return drm_platform_init(&exynos_drm_driver, pdev); | ||
203 | } | ||
204 | |||
205 | static int exynos_drm_platform_remove(struct platform_device *pdev) | ||
206 | { | ||
207 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
208 | |||
209 | drm_platform_exit(&exynos_drm_driver, pdev); | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static struct platform_driver exynos_drm_platform_driver = { | ||
215 | .probe = exynos_drm_platform_probe, | ||
216 | .remove = __devexit_p(exynos_drm_platform_remove), | ||
217 | .driver = { | ||
218 | .owner = THIS_MODULE, | ||
219 | .name = DRIVER_NAME, | ||
220 | }, | ||
221 | }; | ||
222 | |||
223 | static int __init exynos_drm_init(void) | ||
224 | { | ||
225 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
226 | |||
227 | return platform_driver_register(&exynos_drm_platform_driver); | ||
228 | } | ||
229 | |||
230 | static void __exit exynos_drm_exit(void) | ||
231 | { | ||
232 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
233 | |||
234 | platform_driver_unregister(&exynos_drm_platform_driver); | ||
235 | } | ||
236 | |||
237 | module_init(exynos_drm_init); | ||
238 | module_exit(exynos_drm_exit); | ||
239 | |||
240 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
241 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
242 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
243 | MODULE_DESCRIPTION("Samsung SoC DRM Driver"); | ||
244 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h new file mode 100644 index 000000000000..c03683f2ae72 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
@@ -0,0 +1,254 @@ | |||
1 | /* exynos_drm_drv.h | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Authors: | ||
5 | * Inki Dae <inki.dae@samsung.com> | ||
6 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
7 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
8 | * | ||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
10 | * copy of this software and associated documentation files (the "Software"), | ||
11 | * to deal in the Software without restriction, including without limitation | ||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
14 | * Software is furnished to do so, subject to the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice (including the next | ||
17 | * paragraph) shall be included in all copies or substantial portions of the | ||
18 | * Software. | ||
19 | * | ||
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
26 | * OTHER DEALINGS IN THE SOFTWARE. | ||
27 | */ | ||
28 | |||
29 | #ifndef _EXYNOS_DRM_DRV_H_ | ||
30 | #define _EXYNOS_DRM_DRV_H_ | ||
31 | |||
32 | #include "drm.h" | ||
33 | |||
34 | #define MAX_CRTC 2 | ||
35 | |||
36 | struct drm_device; | ||
37 | struct exynos_drm_overlay; | ||
38 | struct drm_connector; | ||
39 | |||
40 | /* this enumerates display type. */ | ||
41 | enum exynos_drm_output_type { | ||
42 | EXYNOS_DISPLAY_TYPE_NONE, | ||
43 | /* RGB or CPU Interface. */ | ||
44 | EXYNOS_DISPLAY_TYPE_LCD, | ||
45 | /* HDMI Interface. */ | ||
46 | EXYNOS_DISPLAY_TYPE_HDMI, | ||
47 | }; | ||
48 | |||
49 | /* | ||
50 | * Exynos drm overlay ops structure. | ||
51 | * | ||
52 | * @mode_set: copy drm overlay info to hw specific overlay info. | ||
53 | * @commit: apply hardware specific overlay data to registers. | ||
54 | * @disable: disable hardware specific overlay. | ||
55 | */ | ||
56 | struct exynos_drm_overlay_ops { | ||
57 | void (*mode_set)(struct device *subdrv_dev, | ||
58 | struct exynos_drm_overlay *overlay); | ||
59 | void (*commit)(struct device *subdrv_dev); | ||
60 | void (*disable)(struct device *subdrv_dev); | ||
61 | }; | ||
62 | |||
63 | /* | ||
64 | * Exynos drm common overlay structure. | ||
65 | * | ||
66 | * @fb_x: offset x on a framebuffer to be displayed. | ||
67 | * - the unit is screen coordinates. | ||
68 | * @fb_y: offset y on a framebuffer to be displayed. | ||
69 | * - the unit is screen coordinates. | ||
70 | * @fb_width: width of a framebuffer. | ||
71 | * @fb_height: height of a framebuffer. | ||
72 | * @crtc_x: offset x on hardware screen. | ||
73 | * @crtc_y: offset y on hardware screen. | ||
74 | * @crtc_width: window width to be displayed (hardware screen). | ||
75 | * @crtc_height: window height to be displayed (hardware screen). | ||
76 | * @mode_width: width of screen mode. | ||
77 | * @mode_height: height of screen mode. | ||
78 | * @refresh: refresh rate. | ||
79 | * @scan_flag: interlace or progressive way. | ||
80 | * (it could be DRM_MODE_FLAG_*) | ||
81 | * @bpp: pixel size.(in bit) | ||
82 | * @paddr: bus(accessed by dma) physical memory address to this overlay | ||
83 | * and this is physically continuous. | ||
84 | * @vaddr: virtual memory addresss to this overlay. | ||
85 | * @default_win: a window to be enabled. | ||
86 | * @color_key: color key on or off. | ||
87 | * @index_color: if using color key feature then this value would be used | ||
88 | * as index color. | ||
89 | * @local_path: in case of lcd type, local path mode on or off. | ||
90 | * @transparency: transparency on or off. | ||
91 | * @activated: activated or not. | ||
92 | * | ||
93 | * this structure is common to exynos SoC and its contents would be copied | ||
94 | * to hardware specific overlay info. | ||
95 | */ | ||
96 | struct exynos_drm_overlay { | ||
97 | unsigned int fb_x; | ||
98 | unsigned int fb_y; | ||
99 | unsigned int fb_width; | ||
100 | unsigned int fb_height; | ||
101 | unsigned int crtc_x; | ||
102 | unsigned int crtc_y; | ||
103 | unsigned int crtc_width; | ||
104 | unsigned int crtc_height; | ||
105 | unsigned int mode_width; | ||
106 | unsigned int mode_height; | ||
107 | unsigned int refresh; | ||
108 | unsigned int scan_flag; | ||
109 | unsigned int bpp; | ||
110 | unsigned int pitch; | ||
111 | dma_addr_t paddr; | ||
112 | void __iomem *vaddr; | ||
113 | |||
114 | bool default_win; | ||
115 | bool color_key; | ||
116 | unsigned int index_color; | ||
117 | bool local_path; | ||
118 | bool transparency; | ||
119 | bool activated; | ||
120 | }; | ||
121 | |||
122 | /* | ||
123 | * Exynos DRM Display Structure. | ||
124 | * - this structure is common to analog tv, digital tv and lcd panel. | ||
125 | * | ||
126 | * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI. | ||
127 | * @is_connected: check for that display is connected or not. | ||
128 | * @get_edid: get edid modes from display driver. | ||
129 | * @get_timing: get timing object from display driver. | ||
130 | * @check_timing: check if timing is valid or not. | ||
131 | * @power_on: display device on or off. | ||
132 | */ | ||
133 | struct exynos_drm_display { | ||
134 | enum exynos_drm_output_type type; | ||
135 | bool (*is_connected)(struct device *dev); | ||
136 | int (*get_edid)(struct device *dev, struct drm_connector *connector, | ||
137 | u8 *edid, int len); | ||
138 | void *(*get_timing)(struct device *dev); | ||
139 | int (*check_timing)(struct device *dev, void *timing); | ||
140 | int (*power_on)(struct device *dev, int mode); | ||
141 | }; | ||
142 | |||
143 | /* | ||
144 | * Exynos drm manager ops | ||
145 | * | ||
146 | * @mode_set: convert drm_display_mode to hw specific display mode and | ||
147 | * would be called by encoder->mode_set(). | ||
148 | * @commit: set current hw specific display mode to hw. | ||
149 | * @enable_vblank: specific driver callback for enabling vblank interrupt. | ||
150 | * @disable_vblank: specific driver callback for disabling vblank interrupt. | ||
151 | */ | ||
152 | struct exynos_drm_manager_ops { | ||
153 | void (*mode_set)(struct device *subdrv_dev, void *mode); | ||
154 | void (*commit)(struct device *subdrv_dev); | ||
155 | int (*enable_vblank)(struct device *subdrv_dev); | ||
156 | void (*disable_vblank)(struct device *subdrv_dev); | ||
157 | }; | ||
158 | |||
159 | /* | ||
160 | * Exynos drm common manager structure. | ||
161 | * | ||
162 | * @dev: pointer to device object for subdrv device driver. | ||
163 | * sub drivers such as display controller or hdmi driver, | ||
164 | * have their own device object. | ||
165 | * @ops: pointer to callbacks for exynos drm specific framebuffer. | ||
166 | * these callbacks should be set by specific drivers such fimd | ||
167 | * or hdmi driver and are used to control hardware global registers. | ||
168 | * @overlay_ops: pointer to callbacks for exynos drm specific framebuffer. | ||
169 | * these callbacks should be set by specific drivers such fimd | ||
170 | * or hdmi driver and are used to control hardware overlay reigsters. | ||
171 | * @display: pointer to callbacks for exynos drm specific framebuffer. | ||
172 | * these callbacks should be set by specific drivers such fimd | ||
173 | * or hdmi driver and are used to control display devices such as | ||
174 | * analog tv, digital tv and lcd panel and also get timing data for them. | ||
175 | */ | ||
176 | struct exynos_drm_manager { | ||
177 | struct device *dev; | ||
178 | int pipe; | ||
179 | struct exynos_drm_manager_ops *ops; | ||
180 | struct exynos_drm_overlay_ops *overlay_ops; | ||
181 | struct exynos_drm_display *display; | ||
182 | }; | ||
183 | |||
184 | /* | ||
185 | * Exynos drm private structure. | ||
186 | */ | ||
187 | struct exynos_drm_private { | ||
188 | struct drm_fb_helper *fb_helper; | ||
189 | |||
190 | /* list head for new event to be added. */ | ||
191 | struct list_head pageflip_event_list; | ||
192 | |||
193 | /* | ||
194 | * created crtc object would be contained at this array and | ||
195 | * this array is used to be aware of which crtc did it request vblank. | ||
196 | */ | ||
197 | struct drm_crtc *crtc[MAX_CRTC]; | ||
198 | }; | ||
199 | |||
200 | /* | ||
201 | * Exynos drm sub driver structure. | ||
202 | * | ||
203 | * @list: sub driver has its own list object to register to exynos drm driver. | ||
204 | * @drm_dev: pointer to drm_device and this pointer would be set | ||
205 | * when sub driver calls exynos_drm_subdrv_register(). | ||
206 | * @probe: this callback would be called by exynos drm driver after | ||
207 | * subdrv is registered to it. | ||
208 | * @remove: this callback is used to release resources created | ||
209 | * by probe callback. | ||
210 | * @manager: subdrv has its own manager to control a hardware appropriately | ||
211 | * and we can access a hardware drawing on this manager. | ||
212 | * @encoder: encoder object owned by this sub driver. | ||
213 | * @connector: connector object owned by this sub driver. | ||
214 | */ | ||
215 | struct exynos_drm_subdrv { | ||
216 | struct list_head list; | ||
217 | struct drm_device *drm_dev; | ||
218 | |||
219 | int (*probe)(struct drm_device *drm_dev, struct device *dev); | ||
220 | void (*remove)(struct drm_device *dev); | ||
221 | |||
222 | struct exynos_drm_manager manager; | ||
223 | struct drm_encoder *encoder; | ||
224 | struct drm_connector *connector; | ||
225 | }; | ||
226 | |||
227 | /* | ||
228 | * this function calls a probe callback registered to sub driver list and | ||
229 | * create its own encoder and connector and then set drm_device object | ||
230 | * to global one. | ||
231 | */ | ||
232 | int exynos_drm_device_register(struct drm_device *dev); | ||
233 | /* | ||
234 | * this function calls a remove callback registered to sub driver list and | ||
235 | * destroy its own encoder and connetor. | ||
236 | */ | ||
237 | int exynos_drm_device_unregister(struct drm_device *dev); | ||
238 | |||
239 | /* | ||
240 | * this function would be called by sub drivers such as display controller | ||
241 | * or hdmi driver to register this sub driver object to exynos drm driver | ||
242 | * and when a sub driver is registered to exynos drm driver a probe callback | ||
243 | * of the sub driver is called and creates its own encoder and connector | ||
244 | * and then fb helper and drm mode group would be re-initialized. | ||
245 | */ | ||
246 | int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv); | ||
247 | |||
248 | /* | ||
249 | * this function removes subdrv list from exynos drm driver and fb helper | ||
250 | * and drm mode group would be re-initialized. | ||
251 | */ | ||
252 | int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv); | ||
253 | |||
254 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c new file mode 100644 index 000000000000..7cf6fa86a67e --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c | |||
@@ -0,0 +1,271 @@ | |||
1 | /* exynos_drm_encoder.c | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Authors: | ||
5 | * Inki Dae <inki.dae@samsung.com> | ||
6 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
7 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
8 | * | ||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
10 | * copy of this software and associated documentation files (the "Software"), | ||
11 | * to deal in the Software without restriction, including without limitation | ||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
14 | * Software is furnished to do so, subject to the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice (including the next | ||
17 | * paragraph) shall be included in all copies or substantial portions of the | ||
18 | * Software. | ||
19 | * | ||
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
26 | * OTHER DEALINGS IN THE SOFTWARE. | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm_crtc_helper.h" | ||
31 | |||
32 | #include "exynos_drm_drv.h" | ||
33 | #include "exynos_drm_crtc.h" | ||
34 | #include "exynos_drm_encoder.h" | ||
35 | |||
36 | #define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\ | ||
37 | drm_encoder) | ||
38 | |||
39 | /* | ||
40 | * exynos specific encoder structure. | ||
41 | * | ||
42 | * @drm_encoder: encoder object. | ||
43 | * @manager: specific encoder has its own manager to control a hardware | ||
44 | * appropriately and we can access a hardware drawing on this manager. | ||
45 | */ | ||
46 | struct exynos_drm_encoder { | ||
47 | struct drm_encoder drm_encoder; | ||
48 | struct exynos_drm_manager *manager; | ||
49 | }; | ||
50 | |||
51 | static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) | ||
52 | { | ||
53 | struct drm_device *dev = encoder->dev; | ||
54 | struct drm_connector *connector; | ||
55 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); | ||
56 | |||
57 | DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); | ||
58 | |||
59 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
60 | if (connector->encoder == encoder) { | ||
61 | struct exynos_drm_display *display = manager->display; | ||
62 | |||
63 | if (display && display->power_on) | ||
64 | display->power_on(manager->dev, mode); | ||
65 | } | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static bool | ||
70 | exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, | ||
71 | struct drm_display_mode *mode, | ||
72 | struct drm_display_mode *adjusted_mode) | ||
73 | { | ||
74 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
75 | |||
76 | /* drm framework doesn't check NULL. */ | ||
77 | |||
78 | return true; | ||
79 | } | ||
80 | |||
81 | static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, | ||
82 | struct drm_display_mode *mode, | ||
83 | struct drm_display_mode *adjusted_mode) | ||
84 | { | ||
85 | struct drm_device *dev = encoder->dev; | ||
86 | struct drm_connector *connector; | ||
87 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); | ||
88 | struct exynos_drm_manager_ops *manager_ops = manager->ops; | ||
89 | struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; | ||
90 | struct exynos_drm_overlay *overlay = get_exynos_drm_overlay(dev, | ||
91 | encoder->crtc); | ||
92 | |||
93 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
94 | |||
95 | mode = adjusted_mode; | ||
96 | |||
97 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
98 | if (connector->encoder == encoder) { | ||
99 | if (manager_ops && manager_ops->mode_set) | ||
100 | manager_ops->mode_set(manager->dev, mode); | ||
101 | |||
102 | if (overlay_ops && overlay_ops->mode_set) | ||
103 | overlay_ops->mode_set(manager->dev, overlay); | ||
104 | } | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static void exynos_drm_encoder_prepare(struct drm_encoder *encoder) | ||
109 | { | ||
110 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
111 | |||
112 | /* drm framework doesn't check NULL. */ | ||
113 | } | ||
114 | |||
115 | static void exynos_drm_encoder_commit(struct drm_encoder *encoder) | ||
116 | { | ||
117 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); | ||
118 | struct exynos_drm_manager_ops *manager_ops = manager->ops; | ||
119 | struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; | ||
120 | |||
121 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
122 | |||
123 | if (manager_ops && manager_ops->commit) | ||
124 | manager_ops->commit(manager->dev); | ||
125 | |||
126 | if (overlay_ops && overlay_ops->commit) | ||
127 | overlay_ops->commit(manager->dev); | ||
128 | } | ||
129 | |||
130 | static struct drm_crtc * | ||
131 | exynos_drm_encoder_get_crtc(struct drm_encoder *encoder) | ||
132 | { | ||
133 | return encoder->crtc; | ||
134 | } | ||
135 | |||
136 | static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { | ||
137 | .dpms = exynos_drm_encoder_dpms, | ||
138 | .mode_fixup = exynos_drm_encoder_mode_fixup, | ||
139 | .mode_set = exynos_drm_encoder_mode_set, | ||
140 | .prepare = exynos_drm_encoder_prepare, | ||
141 | .commit = exynos_drm_encoder_commit, | ||
142 | .get_crtc = exynos_drm_encoder_get_crtc, | ||
143 | }; | ||
144 | |||
145 | static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) | ||
146 | { | ||
147 | struct exynos_drm_encoder *exynos_encoder = | ||
148 | to_exynos_encoder(encoder); | ||
149 | |||
150 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
151 | |||
152 | exynos_encoder->manager->pipe = -1; | ||
153 | |||
154 | drm_encoder_cleanup(encoder); | ||
155 | encoder->dev->mode_config.num_encoder--; | ||
156 | kfree(exynos_encoder); | ||
157 | } | ||
158 | |||
159 | static struct drm_encoder_funcs exynos_encoder_funcs = { | ||
160 | .destroy = exynos_drm_encoder_destroy, | ||
161 | }; | ||
162 | |||
163 | struct drm_encoder * | ||
164 | exynos_drm_encoder_create(struct drm_device *dev, | ||
165 | struct exynos_drm_manager *manager, | ||
166 | unsigned int possible_crtcs) | ||
167 | { | ||
168 | struct drm_encoder *encoder; | ||
169 | struct exynos_drm_encoder *exynos_encoder; | ||
170 | |||
171 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
172 | |||
173 | if (!manager || !possible_crtcs) | ||
174 | return NULL; | ||
175 | |||
176 | if (!manager->dev) | ||
177 | return NULL; | ||
178 | |||
179 | exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL); | ||
180 | if (!exynos_encoder) { | ||
181 | DRM_ERROR("failed to allocate encoder\n"); | ||
182 | return NULL; | ||
183 | } | ||
184 | |||
185 | exynos_encoder->manager = manager; | ||
186 | encoder = &exynos_encoder->drm_encoder; | ||
187 | encoder->possible_crtcs = possible_crtcs; | ||
188 | |||
189 | DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); | ||
190 | |||
191 | drm_encoder_init(dev, encoder, &exynos_encoder_funcs, | ||
192 | DRM_MODE_ENCODER_TMDS); | ||
193 | |||
194 | drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs); | ||
195 | |||
196 | DRM_DEBUG_KMS("encoder has been created\n"); | ||
197 | |||
198 | return encoder; | ||
199 | } | ||
200 | |||
201 | struct exynos_drm_manager *exynos_drm_get_manager(struct drm_encoder *encoder) | ||
202 | { | ||
203 | return to_exynos_encoder(encoder)->manager; | ||
204 | } | ||
205 | |||
206 | void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data, | ||
207 | void (*fn)(struct drm_encoder *, void *)) | ||
208 | { | ||
209 | struct drm_device *dev = crtc->dev; | ||
210 | struct drm_encoder *encoder; | ||
211 | |||
212 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
213 | if (encoder->crtc != crtc) | ||
214 | continue; | ||
215 | |||
216 | fn(encoder, data); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data) | ||
221 | { | ||
222 | struct exynos_drm_manager *manager = | ||
223 | to_exynos_encoder(encoder)->manager; | ||
224 | struct exynos_drm_manager_ops *manager_ops = manager->ops; | ||
225 | int crtc = *(int *)data; | ||
226 | |||
227 | if (manager->pipe == -1) | ||
228 | manager->pipe = crtc; | ||
229 | |||
230 | if (manager_ops->enable_vblank) | ||
231 | manager_ops->enable_vblank(manager->dev); | ||
232 | } | ||
233 | |||
234 | void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data) | ||
235 | { | ||
236 | struct exynos_drm_manager *manager = | ||
237 | to_exynos_encoder(encoder)->manager; | ||
238 | struct exynos_drm_manager_ops *manager_ops = manager->ops; | ||
239 | int crtc = *(int *)data; | ||
240 | |||
241 | if (manager->pipe == -1) | ||
242 | manager->pipe = crtc; | ||
243 | |||
244 | if (manager_ops->disable_vblank) | ||
245 | manager_ops->disable_vblank(manager->dev); | ||
246 | } | ||
247 | |||
248 | void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data) | ||
249 | { | ||
250 | struct exynos_drm_manager *manager = | ||
251 | to_exynos_encoder(encoder)->manager; | ||
252 | struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; | ||
253 | |||
254 | overlay_ops->commit(manager->dev); | ||
255 | } | ||
256 | |||
257 | void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) | ||
258 | { | ||
259 | struct exynos_drm_manager *manager = | ||
260 | to_exynos_encoder(encoder)->manager; | ||
261 | struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; | ||
262 | struct exynos_drm_overlay *overlay = data; | ||
263 | |||
264 | overlay_ops->mode_set(manager->dev, overlay); | ||
265 | } | ||
266 | |||
267 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
268 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
269 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
270 | MODULE_DESCRIPTION("Samsung SoC DRM Encoder Driver"); | ||
271 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h new file mode 100644 index 000000000000..5ecd645d06a9 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * Authors: | ||
4 | * Inki Dae <inki.dae@samsung.com> | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | */ | ||
27 | |||
28 | #ifndef _EXYNOS_DRM_ENCODER_H_ | ||
29 | #define _EXYNOS_DRM_ENCODER_H_ | ||
30 | |||
31 | struct exynos_drm_manager; | ||
32 | |||
33 | struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev, | ||
34 | struct exynos_drm_manager *mgr, | ||
35 | unsigned int possible_crtcs); | ||
36 | struct exynos_drm_manager * | ||
37 | exynos_drm_get_manager(struct drm_encoder *encoder); | ||
38 | void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data, | ||
39 | void (*fn)(struct drm_encoder *, void *)); | ||
40 | void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data); | ||
41 | void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); | ||
42 | void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data); | ||
43 | void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); | ||
44 | |||
45 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c new file mode 100644 index 000000000000..48d29cfd5240 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
@@ -0,0 +1,265 @@ | |||
1 | /* exynos_drm_fb.c | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Authors: | ||
5 | * Inki Dae <inki.dae@samsung.com> | ||
6 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
7 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
8 | * | ||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
10 | * copy of this software and associated documentation files (the "Software"), | ||
11 | * to deal in the Software without restriction, including without limitation | ||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
14 | * Software is furnished to do so, subject to the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice (including the next | ||
17 | * paragraph) shall be included in all copies or substantial portions of the | ||
18 | * Software. | ||
19 | * | ||
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
26 | * OTHER DEALINGS IN THE SOFTWARE. | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm_crtc.h" | ||
31 | #include "drm_crtc_helper.h" | ||
32 | |||
33 | #include "exynos_drm_fb.h" | ||
34 | #include "exynos_drm_buf.h" | ||
35 | #include "exynos_drm_gem.h" | ||
36 | |||
37 | #define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) | ||
38 | |||
39 | /* | ||
40 | * exynos specific framebuffer structure. | ||
41 | * | ||
42 | * @fb: drm framebuffer obejct. | ||
43 | * @exynos_gem_obj: exynos specific gem object containing a gem object. | ||
44 | * @entry: pointer to exynos drm buffer entry object. | ||
45 | * - containing only the information to physically continuous memory | ||
46 | * region allocated at default framebuffer creation. | ||
47 | */ | ||
48 | struct exynos_drm_fb { | ||
49 | struct drm_framebuffer fb; | ||
50 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
51 | struct exynos_drm_buf_entry *entry; | ||
52 | }; | ||
53 | |||
54 | static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) | ||
55 | { | ||
56 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); | ||
57 | |||
58 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
59 | |||
60 | drm_framebuffer_cleanup(fb); | ||
61 | |||
62 | /* | ||
63 | * default framebuffer has no gem object so | ||
64 | * a buffer of the default framebuffer should be released at here. | ||
65 | */ | ||
66 | if (!exynos_fb->exynos_gem_obj && exynos_fb->entry) | ||
67 | exynos_drm_buf_destroy(fb->dev, exynos_fb->entry); | ||
68 | |||
69 | kfree(exynos_fb); | ||
70 | exynos_fb = NULL; | ||
71 | } | ||
72 | |||
73 | static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, | ||
74 | struct drm_file *file_priv, | ||
75 | unsigned int *handle) | ||
76 | { | ||
77 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); | ||
78 | |||
79 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
80 | |||
81 | return drm_gem_handle_create(file_priv, | ||
82 | &exynos_fb->exynos_gem_obj->base, handle); | ||
83 | } | ||
84 | |||
85 | static int exynos_drm_fb_dirty(struct drm_framebuffer *fb, | ||
86 | struct drm_file *file_priv, unsigned flags, | ||
87 | unsigned color, struct drm_clip_rect *clips, | ||
88 | unsigned num_clips) | ||
89 | { | ||
90 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
91 | |||
92 | /* TODO */ | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static struct drm_framebuffer_funcs exynos_drm_fb_funcs = { | ||
98 | .destroy = exynos_drm_fb_destroy, | ||
99 | .create_handle = exynos_drm_fb_create_handle, | ||
100 | .dirty = exynos_drm_fb_dirty, | ||
101 | }; | ||
102 | |||
103 | static struct drm_framebuffer * | ||
104 | exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev, | ||
105 | struct drm_mode_fb_cmd *mode_cmd) | ||
106 | { | ||
107 | struct exynos_drm_fb *exynos_fb; | ||
108 | struct drm_framebuffer *fb; | ||
109 | struct exynos_drm_gem_obj *exynos_gem_obj = NULL; | ||
110 | struct drm_gem_object *obj; | ||
111 | unsigned int size; | ||
112 | int ret; | ||
113 | |||
114 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
115 | |||
116 | mode_cmd->pitch = max(mode_cmd->pitch, | ||
117 | mode_cmd->width * (mode_cmd->bpp >> 3)); | ||
118 | |||
119 | DRM_LOG_KMS("drm fb create(%dx%d)\n", | ||
120 | mode_cmd->width, mode_cmd->height); | ||
121 | |||
122 | exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); | ||
123 | if (!exynos_fb) { | ||
124 | DRM_ERROR("failed to allocate exynos drm framebuffer.\n"); | ||
125 | return ERR_PTR(-ENOMEM); | ||
126 | } | ||
127 | |||
128 | fb = &exynos_fb->fb; | ||
129 | ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs); | ||
130 | if (ret) { | ||
131 | DRM_ERROR("failed to initialize framebuffer.\n"); | ||
132 | goto err_init; | ||
133 | } | ||
134 | |||
135 | DRM_LOG_KMS("create: fb id: %d\n", fb->base.id); | ||
136 | |||
137 | size = mode_cmd->pitch * mode_cmd->height; | ||
138 | |||
139 | /* | ||
140 | * mode_cmd->handle could be NULL at booting time or | ||
141 | * with user request. if NULL, a new buffer or a gem object | ||
142 | * would be allocated. | ||
143 | */ | ||
144 | if (!mode_cmd->handle) { | ||
145 | if (!file_priv) { | ||
146 | struct exynos_drm_buf_entry *entry; | ||
147 | |||
148 | /* | ||
149 | * in case that file_priv is NULL, it allocates | ||
150 | * only buffer and this buffer would be used | ||
151 | * for default framebuffer. | ||
152 | */ | ||
153 | entry = exynos_drm_buf_create(dev, size); | ||
154 | if (IS_ERR(entry)) { | ||
155 | ret = PTR_ERR(entry); | ||
156 | goto err_buffer; | ||
157 | } | ||
158 | |||
159 | exynos_fb->entry = entry; | ||
160 | |||
161 | DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n", | ||
162 | (unsigned long)entry->paddr, size); | ||
163 | |||
164 | goto out; | ||
165 | } else { | ||
166 | exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, | ||
167 | size, | ||
168 | &mode_cmd->handle); | ||
169 | if (IS_ERR(exynos_gem_obj)) { | ||
170 | ret = PTR_ERR(exynos_gem_obj); | ||
171 | goto err_buffer; | ||
172 | } | ||
173 | } | ||
174 | } else { | ||
175 | obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); | ||
176 | if (!obj) { | ||
177 | DRM_ERROR("failed to lookup gem object.\n"); | ||
178 | goto err_buffer; | ||
179 | } | ||
180 | |||
181 | exynos_gem_obj = to_exynos_gem_obj(obj); | ||
182 | |||
183 | drm_gem_object_unreference_unlocked(obj); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * if got a exynos_gem_obj from either a handle or | ||
188 | * a new creation then exynos_fb->exynos_gem_obj is NULL | ||
189 | * so that default framebuffer has no its own gem object, | ||
190 | * only its own buffer object. | ||
191 | */ | ||
192 | exynos_fb->entry = exynos_gem_obj->entry; | ||
193 | |||
194 | DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n", | ||
195 | (unsigned long)exynos_fb->entry->paddr, size, | ||
196 | (unsigned int)&exynos_gem_obj->base); | ||
197 | |||
198 | out: | ||
199 | exynos_fb->exynos_gem_obj = exynos_gem_obj; | ||
200 | |||
201 | drm_helper_mode_fill_fb_struct(fb, mode_cmd); | ||
202 | |||
203 | return fb; | ||
204 | |||
205 | err_buffer: | ||
206 | drm_framebuffer_cleanup(fb); | ||
207 | |||
208 | err_init: | ||
209 | kfree(exynos_fb); | ||
210 | |||
211 | return ERR_PTR(ret); | ||
212 | } | ||
213 | |||
214 | struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev, | ||
215 | struct drm_file *file_priv, | ||
216 | struct drm_mode_fb_cmd *mode_cmd) | ||
217 | { | ||
218 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
219 | |||
220 | return exynos_drm_fb_init(file_priv, dev, mode_cmd); | ||
221 | } | ||
222 | |||
223 | struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) | ||
224 | { | ||
225 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); | ||
226 | struct exynos_drm_buf_entry *entry; | ||
227 | |||
228 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
229 | |||
230 | entry = exynos_fb->entry; | ||
231 | if (!entry) | ||
232 | return NULL; | ||
233 | |||
234 | DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", | ||
235 | (unsigned long)entry->vaddr, | ||
236 | (unsigned long)entry->paddr); | ||
237 | |||
238 | return entry; | ||
239 | } | ||
240 | |||
241 | static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { | ||
242 | .fb_create = exynos_drm_fb_create, | ||
243 | }; | ||
244 | |||
245 | void exynos_drm_mode_config_init(struct drm_device *dev) | ||
246 | { | ||
247 | dev->mode_config.min_width = 0; | ||
248 | dev->mode_config.min_height = 0; | ||
249 | |||
250 | /* | ||
251 | * set max width and height as default value(4096x4096). | ||
252 | * this value would be used to check framebuffer size limitation | ||
253 | * at drm_mode_addfb(). | ||
254 | */ | ||
255 | dev->mode_config.max_width = 4096; | ||
256 | dev->mode_config.max_height = 4096; | ||
257 | |||
258 | dev->mode_config.funcs = &exynos_drm_mode_config_funcs; | ||
259 | } | ||
260 | |||
261 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
262 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
263 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
264 | MODULE_DESCRIPTION("Samsung SoC DRM FB Driver"); | ||
265 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h new file mode 100644 index 000000000000..eb35931d302c --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * Authors: | ||
4 | * Inki Dae <inki.dae@samsung.com> | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | */ | ||
27 | |||
28 | #ifndef _EXYNOS_DRM_FB_H_ | ||
29 | #define _EXYNOS_DRM_FB_H | ||
30 | |||
31 | struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev, | ||
32 | struct drm_file *filp, | ||
33 | struct drm_mode_fb_cmd *mode_cmd); | ||
34 | |||
35 | void exynos_drm_mode_config_init(struct drm_device *dev); | ||
36 | |||
37 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c new file mode 100644 index 000000000000..1f4b3d1a7713 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c | |||
@@ -0,0 +1,456 @@ | |||
1 | /* exynos_drm_fbdev.c | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Authors: | ||
5 | * Inki Dae <inki.dae@samsung.com> | ||
6 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
7 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
8 | * | ||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
10 | * copy of this software and associated documentation files (the "Software"), | ||
11 | * to deal in the Software without restriction, including without limitation | ||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
14 | * Software is furnished to do so, subject to the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice (including the next | ||
17 | * paragraph) shall be included in all copies or substantial portions of the | ||
18 | * Software. | ||
19 | * | ||
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
26 | * OTHER DEALINGS IN THE SOFTWARE. | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm_crtc.h" | ||
31 | #include "drm_fb_helper.h" | ||
32 | #include "drm_crtc_helper.h" | ||
33 | |||
34 | #include "exynos_drm_drv.h" | ||
35 | #include "exynos_drm_fb.h" | ||
36 | #include "exynos_drm_buf.h" | ||
37 | |||
38 | #define MAX_CONNECTOR 4 | ||
39 | #define PREFERRED_BPP 32 | ||
40 | |||
41 | #define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\ | ||
42 | drm_fb_helper) | ||
43 | |||
44 | struct exynos_drm_fbdev { | ||
45 | struct drm_fb_helper drm_fb_helper; | ||
46 | struct drm_framebuffer *fb; | ||
47 | }; | ||
48 | |||
49 | static int exynos_drm_fbdev_set_par(struct fb_info *info) | ||
50 | { | ||
51 | struct fb_var_screeninfo *var = &info->var; | ||
52 | |||
53 | switch (var->bits_per_pixel) { | ||
54 | case 32: | ||
55 | case 24: | ||
56 | case 18: | ||
57 | case 16: | ||
58 | case 12: | ||
59 | info->fix.visual = FB_VISUAL_TRUECOLOR; | ||
60 | break; | ||
61 | case 1: | ||
62 | info->fix.visual = FB_VISUAL_MONO01; | ||
63 | break; | ||
64 | default: | ||
65 | info->fix.visual = FB_VISUAL_PSEUDOCOLOR; | ||
66 | break; | ||
67 | } | ||
68 | |||
69 | info->fix.line_length = (var->xres_virtual * var->bits_per_pixel) / 8; | ||
70 | |||
71 | return drm_fb_helper_set_par(info); | ||
72 | } | ||
73 | |||
74 | |||
75 | static struct fb_ops exynos_drm_fb_ops = { | ||
76 | .owner = THIS_MODULE, | ||
77 | .fb_fillrect = cfb_fillrect, | ||
78 | .fb_copyarea = cfb_copyarea, | ||
79 | .fb_imageblit = cfb_imageblit, | ||
80 | .fb_check_var = drm_fb_helper_check_var, | ||
81 | .fb_set_par = exynos_drm_fbdev_set_par, | ||
82 | .fb_blank = drm_fb_helper_blank, | ||
83 | .fb_pan_display = drm_fb_helper_pan_display, | ||
84 | .fb_setcmap = drm_fb_helper_setcmap, | ||
85 | }; | ||
86 | |||
87 | static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, | ||
88 | struct drm_framebuffer *fb, | ||
89 | unsigned int fb_width, | ||
90 | unsigned int fb_height) | ||
91 | { | ||
92 | struct fb_info *fbi = helper->fbdev; | ||
93 | struct drm_device *dev = helper->dev; | ||
94 | struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper); | ||
95 | struct exynos_drm_buf_entry *entry; | ||
96 | unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3); | ||
97 | unsigned long offset; | ||
98 | |||
99 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
100 | |||
101 | exynos_fb->fb = fb; | ||
102 | |||
103 | drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth); | ||
104 | drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height); | ||
105 | |||
106 | entry = exynos_drm_fb_get_buf(fb); | ||
107 | if (!entry) { | ||
108 | DRM_LOG_KMS("entry is null.\n"); | ||
109 | return -EFAULT; | ||
110 | } | ||
111 | |||
112 | offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); | ||
113 | offset += fbi->var.yoffset * fb->pitch; | ||
114 | |||
115 | dev->mode_config.fb_base = entry->paddr; | ||
116 | fbi->screen_base = entry->vaddr + offset; | ||
117 | fbi->fix.smem_start = entry->paddr + offset; | ||
118 | fbi->screen_size = size; | ||
119 | fbi->fix.smem_len = size; | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, | ||
125 | struct drm_fb_helper_surface_size *sizes) | ||
126 | { | ||
127 | struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper); | ||
128 | struct drm_device *dev = helper->dev; | ||
129 | struct fb_info *fbi; | ||
130 | struct drm_mode_fb_cmd mode_cmd = { 0 }; | ||
131 | struct platform_device *pdev = dev->platformdev; | ||
132 | int ret; | ||
133 | |||
134 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
135 | |||
136 | DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n", | ||
137 | sizes->surface_width, sizes->surface_height, | ||
138 | sizes->surface_bpp); | ||
139 | |||
140 | mode_cmd.width = sizes->surface_width; | ||
141 | mode_cmd.height = sizes->surface_height; | ||
142 | mode_cmd.bpp = sizes->surface_bpp; | ||
143 | mode_cmd.depth = sizes->surface_depth; | ||
144 | |||
145 | mutex_lock(&dev->struct_mutex); | ||
146 | |||
147 | fbi = framebuffer_alloc(0, &pdev->dev); | ||
148 | if (!fbi) { | ||
149 | DRM_ERROR("failed to allocate fb info.\n"); | ||
150 | ret = -ENOMEM; | ||
151 | goto out; | ||
152 | } | ||
153 | |||
154 | exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd); | ||
155 | if (IS_ERR_OR_NULL(exynos_fbdev->fb)) { | ||
156 | DRM_ERROR("failed to create drm framebuffer.\n"); | ||
157 | ret = PTR_ERR(exynos_fbdev->fb); | ||
158 | goto out; | ||
159 | } | ||
160 | |||
161 | helper->fb = exynos_fbdev->fb; | ||
162 | helper->fbdev = fbi; | ||
163 | |||
164 | fbi->par = helper; | ||
165 | fbi->flags = FBINFO_FLAG_DEFAULT; | ||
166 | fbi->fbops = &exynos_drm_fb_ops; | ||
167 | |||
168 | ret = fb_alloc_cmap(&fbi->cmap, 256, 0); | ||
169 | if (ret) { | ||
170 | DRM_ERROR("failed to allocate cmap.\n"); | ||
171 | goto out; | ||
172 | } | ||
173 | |||
174 | ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, | ||
175 | sizes->fb_height); | ||
176 | if (ret < 0) | ||
177 | fb_dealloc_cmap(&fbi->cmap); | ||
178 | |||
179 | /* | ||
180 | * if failed, all resources allocated above would be released by | ||
181 | * drm_mode_config_cleanup() when drm_load() had been called prior | ||
182 | * to any specific driver such as fimd or hdmi driver. | ||
183 | */ | ||
184 | out: | ||
185 | mutex_unlock(&dev->struct_mutex); | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | static bool | ||
190 | exynos_drm_fbdev_is_samefb(struct drm_framebuffer *fb, | ||
191 | struct drm_fb_helper_surface_size *sizes) | ||
192 | { | ||
193 | if (fb->width != sizes->surface_width) | ||
194 | return false; | ||
195 | if (fb->height != sizes->surface_height) | ||
196 | return false; | ||
197 | if (fb->bits_per_pixel != sizes->surface_bpp) | ||
198 | return false; | ||
199 | if (fb->depth != sizes->surface_depth) | ||
200 | return false; | ||
201 | |||
202 | return true; | ||
203 | } | ||
204 | |||
205 | static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper, | ||
206 | struct drm_fb_helper_surface_size *sizes) | ||
207 | { | ||
208 | struct drm_device *dev = helper->dev; | ||
209 | struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper); | ||
210 | struct drm_framebuffer *fb = exynos_fbdev->fb; | ||
211 | struct drm_mode_fb_cmd mode_cmd = { 0 }; | ||
212 | |||
213 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
214 | |||
215 | if (helper->fb != fb) { | ||
216 | DRM_ERROR("drm framebuffer is different\n"); | ||
217 | return -EINVAL; | ||
218 | } | ||
219 | |||
220 | if (exynos_drm_fbdev_is_samefb(fb, sizes)) | ||
221 | return 0; | ||
222 | |||
223 | mode_cmd.width = sizes->surface_width; | ||
224 | mode_cmd.height = sizes->surface_height; | ||
225 | mode_cmd.bpp = sizes->surface_bpp; | ||
226 | mode_cmd.depth = sizes->surface_depth; | ||
227 | |||
228 | if (fb->funcs->destroy) | ||
229 | fb->funcs->destroy(fb); | ||
230 | |||
231 | exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd); | ||
232 | if (IS_ERR(exynos_fbdev->fb)) { | ||
233 | DRM_ERROR("failed to allocate fb.\n"); | ||
234 | return PTR_ERR(exynos_fbdev->fb); | ||
235 | } | ||
236 | |||
237 | helper->fb = exynos_fbdev->fb; | ||
238 | return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, | ||
239 | sizes->fb_height); | ||
240 | } | ||
241 | |||
242 | static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper, | ||
243 | struct drm_fb_helper_surface_size *sizes) | ||
244 | { | ||
245 | int ret = 0; | ||
246 | |||
247 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
248 | |||
249 | if (!helper->fb) { | ||
250 | ret = exynos_drm_fbdev_create(helper, sizes); | ||
251 | if (ret < 0) { | ||
252 | DRM_ERROR("failed to create fbdev.\n"); | ||
253 | return ret; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * fb_helper expects a value more than 1 if succeed | ||
258 | * because register_framebuffer() should be called. | ||
259 | */ | ||
260 | ret = 1; | ||
261 | } else { | ||
262 | ret = exynos_drm_fbdev_recreate(helper, sizes); | ||
263 | if (ret < 0) { | ||
264 | DRM_ERROR("failed to reconfigure fbdev\n"); | ||
265 | return ret; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | return ret; | ||
270 | } | ||
271 | |||
272 | static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { | ||
273 | .fb_probe = exynos_drm_fbdev_probe, | ||
274 | }; | ||
275 | |||
276 | int exynos_drm_fbdev_init(struct drm_device *dev) | ||
277 | { | ||
278 | struct exynos_drm_fbdev *fbdev; | ||
279 | struct exynos_drm_private *private = dev->dev_private; | ||
280 | struct drm_fb_helper *helper; | ||
281 | unsigned int num_crtc; | ||
282 | int ret; | ||
283 | |||
284 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
285 | |||
286 | if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) | ||
287 | return 0; | ||
288 | |||
289 | fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); | ||
290 | if (!fbdev) { | ||
291 | DRM_ERROR("failed to allocate drm fbdev.\n"); | ||
292 | return -ENOMEM; | ||
293 | } | ||
294 | |||
295 | private->fb_helper = helper = &fbdev->drm_fb_helper; | ||
296 | helper->funcs = &exynos_drm_fb_helper_funcs; | ||
297 | |||
298 | num_crtc = dev->mode_config.num_crtc; | ||
299 | |||
300 | ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR); | ||
301 | if (ret < 0) { | ||
302 | DRM_ERROR("failed to initialize drm fb helper.\n"); | ||
303 | goto err_init; | ||
304 | } | ||
305 | |||
306 | ret = drm_fb_helper_single_add_all_connectors(helper); | ||
307 | if (ret < 0) { | ||
308 | DRM_ERROR("failed to register drm_fb_helper_connector.\n"); | ||
309 | goto err_setup; | ||
310 | |||
311 | } | ||
312 | |||
313 | ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); | ||
314 | if (ret < 0) { | ||
315 | DRM_ERROR("failed to set up hw configuration.\n"); | ||
316 | goto err_setup; | ||
317 | } | ||
318 | |||
319 | return 0; | ||
320 | |||
321 | err_setup: | ||
322 | drm_fb_helper_fini(helper); | ||
323 | |||
324 | err_init: | ||
325 | private->fb_helper = NULL; | ||
326 | kfree(fbdev); | ||
327 | |||
328 | return ret; | ||
329 | } | ||
330 | |||
331 | static void exynos_drm_fbdev_destroy(struct drm_device *dev, | ||
332 | struct drm_fb_helper *fb_helper) | ||
333 | { | ||
334 | struct drm_framebuffer *fb; | ||
335 | |||
336 | /* release drm framebuffer and real buffer */ | ||
337 | if (fb_helper->fb && fb_helper->fb->funcs) { | ||
338 | fb = fb_helper->fb; | ||
339 | if (fb && fb->funcs->destroy) | ||
340 | fb->funcs->destroy(fb); | ||
341 | } | ||
342 | |||
343 | /* release linux framebuffer */ | ||
344 | if (fb_helper->fbdev) { | ||
345 | struct fb_info *info; | ||
346 | int ret; | ||
347 | |||
348 | info = fb_helper->fbdev; | ||
349 | ret = unregister_framebuffer(info); | ||
350 | if (ret < 0) | ||
351 | DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); | ||
352 | |||
353 | if (info->cmap.len) | ||
354 | fb_dealloc_cmap(&info->cmap); | ||
355 | |||
356 | framebuffer_release(info); | ||
357 | } | ||
358 | |||
359 | drm_fb_helper_fini(fb_helper); | ||
360 | } | ||
361 | |||
362 | void exynos_drm_fbdev_fini(struct drm_device *dev) | ||
363 | { | ||
364 | struct exynos_drm_private *private = dev->dev_private; | ||
365 | struct exynos_drm_fbdev *fbdev; | ||
366 | |||
367 | if (!private || !private->fb_helper) | ||
368 | return; | ||
369 | |||
370 | fbdev = to_exynos_fbdev(private->fb_helper); | ||
371 | |||
372 | exynos_drm_fbdev_destroy(dev, private->fb_helper); | ||
373 | kfree(fbdev); | ||
374 | private->fb_helper = NULL; | ||
375 | } | ||
376 | |||
377 | void exynos_drm_fbdev_restore_mode(struct drm_device *dev) | ||
378 | { | ||
379 | struct exynos_drm_private *private = dev->dev_private; | ||
380 | |||
381 | if (!private || !private->fb_helper) | ||
382 | return; | ||
383 | |||
384 | drm_fb_helper_restore_fbdev_mode(private->fb_helper); | ||
385 | } | ||
386 | |||
387 | int exynos_drm_fbdev_reinit(struct drm_device *dev) | ||
388 | { | ||
389 | struct exynos_drm_private *private = dev->dev_private; | ||
390 | struct drm_fb_helper *fb_helper; | ||
391 | int ret; | ||
392 | |||
393 | if (!private) | ||
394 | return -EINVAL; | ||
395 | |||
396 | /* | ||
397 | * if all sub drivers were unloaded then num_connector is 0 | ||
398 | * so at this time, the framebuffers also should be destroyed. | ||
399 | */ | ||
400 | if (!dev->mode_config.num_connector) { | ||
401 | exynos_drm_fbdev_fini(dev); | ||
402 | return 0; | ||
403 | } | ||
404 | |||
405 | fb_helper = private->fb_helper; | ||
406 | |||
407 | if (fb_helper) { | ||
408 | drm_fb_helper_fini(fb_helper); | ||
409 | |||
410 | ret = drm_fb_helper_init(dev, fb_helper, | ||
411 | dev->mode_config.num_crtc, MAX_CONNECTOR); | ||
412 | if (ret < 0) { | ||
413 | DRM_ERROR("failed to initialize drm fb helper\n"); | ||
414 | return ret; | ||
415 | } | ||
416 | |||
417 | ret = drm_fb_helper_single_add_all_connectors(fb_helper); | ||
418 | if (ret < 0) { | ||
419 | DRM_ERROR("failed to add fb helper to connectors\n"); | ||
420 | goto err; | ||
421 | } | ||
422 | |||
423 | ret = drm_fb_helper_initial_config(fb_helper, PREFERRED_BPP); | ||
424 | if (ret < 0) { | ||
425 | DRM_ERROR("failed to set up hw configuration.\n"); | ||
426 | goto err; | ||
427 | } | ||
428 | } else { | ||
429 | /* | ||
430 | * if drm_load() failed whem drm load() was called prior | ||
431 | * to specific drivers, fb_helper must be NULL and so | ||
432 | * this fuction should be called again to re-initialize and | ||
433 | * re-configure the fb helper. it means that this function | ||
434 | * has been called by the specific drivers. | ||
435 | */ | ||
436 | ret = exynos_drm_fbdev_init(dev); | ||
437 | } | ||
438 | |||
439 | return ret; | ||
440 | |||
441 | err: | ||
442 | /* | ||
443 | * if drm_load() failed when drm load() was called prior | ||
444 | * to specific drivers, the fb_helper must be NULL and so check it. | ||
445 | */ | ||
446 | if (fb_helper) | ||
447 | drm_fb_helper_fini(fb_helper); | ||
448 | |||
449 | return ret; | ||
450 | } | ||
451 | |||
452 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
453 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
454 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
455 | MODULE_DESCRIPTION("Samsung SoC DRM FBDEV Driver"); | ||
456 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h new file mode 100644 index 000000000000..ccfce8a1a451 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * | ||
4 | * Authors: | ||
5 | * Inki Dae <inki.dae@samsung.com> | ||
6 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
7 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
8 | * | ||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
10 | * copy of this software and associated documentation files (the "Software"), | ||
11 | * to deal in the Software without restriction, including without limitation | ||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
14 | * Software is furnished to do so, subject to the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice (including the next | ||
17 | * paragraph) shall be included in all copies or substantial portions of the | ||
18 | * Software. | ||
19 | * | ||
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
26 | * OTHER DEALINGS IN THE SOFTWARE. | ||
27 | */ | ||
28 | |||
29 | #ifndef _EXYNOS_DRM_FBDEV_H_ | ||
30 | #define _EXYNOS_DRM_FBDEV_H_ | ||
31 | |||
32 | int exynos_drm_fbdev_init(struct drm_device *dev); | ||
33 | int exynos_drm_fbdev_reinit(struct drm_device *dev); | ||
34 | void exynos_drm_fbdev_fini(struct drm_device *dev); | ||
35 | void exynos_drm_fbdev_restore_mode(struct drm_device *dev); | ||
36 | |||
37 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c new file mode 100644 index 000000000000..4659c88cdd9b --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -0,0 +1,811 @@ | |||
1 | /* exynos_drm_fimd.c | ||
2 | * | ||
3 | * Copyright (C) 2011 Samsung Electronics Co.Ltd | ||
4 | * Authors: | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Inki Dae <inki.dae@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | #include "drmP.h" | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/clk.h> | ||
20 | |||
21 | #include <drm/exynos_drm.h> | ||
22 | #include <plat/regs-fb-v4.h> | ||
23 | |||
24 | #include "exynos_drm_drv.h" | ||
25 | #include "exynos_drm_fbdev.h" | ||
26 | #include "exynos_drm_crtc.h" | ||
27 | |||
28 | /* | ||
29 | * FIMD is stand for Fully Interactive Mobile Display and | ||
30 | * as a display controller, it transfers contents drawn on memory | ||
31 | * to a LCD Panel through Display Interfaces such as RGB or | ||
32 | * CPU Interface. | ||
33 | */ | ||
34 | |||
35 | /* position control register for hardware window 0, 2 ~ 4.*/ | ||
36 | #define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16) | ||
37 | #define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16) | ||
38 | /* size control register for hardware window 0. */ | ||
39 | #define VIDOSD_C_SIZE_W0 (VIDOSD_BASE + 0x08) | ||
40 | /* alpha control register for hardware window 1 ~ 4. */ | ||
41 | #define VIDOSD_C(win) (VIDOSD_BASE + 0x18 + (win) * 16) | ||
42 | /* size control register for hardware window 1 ~ 4. */ | ||
43 | #define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16) | ||
44 | |||
45 | #define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8) | ||
46 | #define VIDWx_BUF_END(win, buf) (VIDW_BUF_END(buf) + (win) * 8) | ||
47 | #define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4) | ||
48 | |||
49 | /* color key control register for hardware window 1 ~ 4. */ | ||
50 | #define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + (x * 8)) | ||
51 | /* color key value register for hardware window 1 ~ 4. */ | ||
52 | #define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + (x * 8)) | ||
53 | |||
54 | /* FIMD has totally five hardware windows. */ | ||
55 | #define WINDOWS_NR 5 | ||
56 | |||
57 | #define get_fimd_context(dev) platform_get_drvdata(to_platform_device(dev)) | ||
58 | |||
59 | struct fimd_win_data { | ||
60 | unsigned int offset_x; | ||
61 | unsigned int offset_y; | ||
62 | unsigned int ovl_width; | ||
63 | unsigned int ovl_height; | ||
64 | unsigned int fb_width; | ||
65 | unsigned int fb_height; | ||
66 | unsigned int bpp; | ||
67 | dma_addr_t paddr; | ||
68 | void __iomem *vaddr; | ||
69 | unsigned int buf_offsize; | ||
70 | unsigned int line_size; /* bytes */ | ||
71 | }; | ||
72 | |||
73 | struct fimd_context { | ||
74 | struct exynos_drm_subdrv subdrv; | ||
75 | int irq; | ||
76 | struct drm_crtc *crtc; | ||
77 | struct clk *bus_clk; | ||
78 | struct clk *lcd_clk; | ||
79 | struct resource *regs_res; | ||
80 | void __iomem *regs; | ||
81 | struct fimd_win_data win_data[WINDOWS_NR]; | ||
82 | unsigned int clkdiv; | ||
83 | unsigned int default_win; | ||
84 | unsigned long irq_flags; | ||
85 | u32 vidcon0; | ||
86 | u32 vidcon1; | ||
87 | |||
88 | struct fb_videomode *timing; | ||
89 | }; | ||
90 | |||
91 | static bool fimd_display_is_connected(struct device *dev) | ||
92 | { | ||
93 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
94 | |||
95 | /* TODO. */ | ||
96 | |||
97 | return true; | ||
98 | } | ||
99 | |||
100 | static void *fimd_get_timing(struct device *dev) | ||
101 | { | ||
102 | struct fimd_context *ctx = get_fimd_context(dev); | ||
103 | |||
104 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
105 | |||
106 | return ctx->timing; | ||
107 | } | ||
108 | |||
109 | static int fimd_check_timing(struct device *dev, void *timing) | ||
110 | { | ||
111 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
112 | |||
113 | /* TODO. */ | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | static int fimd_display_power_on(struct device *dev, int mode) | ||
119 | { | ||
120 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
121 | |||
122 | /* TODO. */ | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static struct exynos_drm_display fimd_display = { | ||
128 | .type = EXYNOS_DISPLAY_TYPE_LCD, | ||
129 | .is_connected = fimd_display_is_connected, | ||
130 | .get_timing = fimd_get_timing, | ||
131 | .check_timing = fimd_check_timing, | ||
132 | .power_on = fimd_display_power_on, | ||
133 | }; | ||
134 | |||
135 | static void fimd_commit(struct device *dev) | ||
136 | { | ||
137 | struct fimd_context *ctx = get_fimd_context(dev); | ||
138 | struct fb_videomode *timing = ctx->timing; | ||
139 | u32 val; | ||
140 | |||
141 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
142 | |||
143 | /* setup polarity values from machine code. */ | ||
144 | writel(ctx->vidcon1, ctx->regs + VIDCON1); | ||
145 | |||
146 | /* setup vertical timing values. */ | ||
147 | val = VIDTCON0_VBPD(timing->upper_margin - 1) | | ||
148 | VIDTCON0_VFPD(timing->lower_margin - 1) | | ||
149 | VIDTCON0_VSPW(timing->vsync_len - 1); | ||
150 | writel(val, ctx->regs + VIDTCON0); | ||
151 | |||
152 | /* setup horizontal timing values. */ | ||
153 | val = VIDTCON1_HBPD(timing->left_margin - 1) | | ||
154 | VIDTCON1_HFPD(timing->right_margin - 1) | | ||
155 | VIDTCON1_HSPW(timing->hsync_len - 1); | ||
156 | writel(val, ctx->regs + VIDTCON1); | ||
157 | |||
158 | /* setup horizontal and vertical display size. */ | ||
159 | val = VIDTCON2_LINEVAL(timing->yres - 1) | | ||
160 | VIDTCON2_HOZVAL(timing->xres - 1); | ||
161 | writel(val, ctx->regs + VIDTCON2); | ||
162 | |||
163 | /* setup clock source, clock divider, enable dma. */ | ||
164 | val = ctx->vidcon0; | ||
165 | val &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); | ||
166 | |||
167 | if (ctx->clkdiv > 1) | ||
168 | val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR; | ||
169 | else | ||
170 | val &= ~VIDCON0_CLKDIR; /* 1:1 clock */ | ||
171 | |||
172 | /* | ||
173 | * fields of register with prefix '_F' would be updated | ||
174 | * at vsync(same as dma start) | ||
175 | */ | ||
176 | val |= VIDCON0_ENVID | VIDCON0_ENVID_F; | ||
177 | writel(val, ctx->regs + VIDCON0); | ||
178 | } | ||
179 | |||
180 | static int fimd_enable_vblank(struct device *dev) | ||
181 | { | ||
182 | struct fimd_context *ctx = get_fimd_context(dev); | ||
183 | u32 val; | ||
184 | |||
185 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
186 | |||
187 | if (!test_and_set_bit(0, &ctx->irq_flags)) { | ||
188 | val = readl(ctx->regs + VIDINTCON0); | ||
189 | |||
190 | val |= VIDINTCON0_INT_ENABLE; | ||
191 | val |= VIDINTCON0_INT_FRAME; | ||
192 | |||
193 | val &= ~VIDINTCON0_FRAMESEL0_MASK; | ||
194 | val |= VIDINTCON0_FRAMESEL0_VSYNC; | ||
195 | val &= ~VIDINTCON0_FRAMESEL1_MASK; | ||
196 | val |= VIDINTCON0_FRAMESEL1_NONE; | ||
197 | |||
198 | writel(val, ctx->regs + VIDINTCON0); | ||
199 | } | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static void fimd_disable_vblank(struct device *dev) | ||
205 | { | ||
206 | struct fimd_context *ctx = get_fimd_context(dev); | ||
207 | u32 val; | ||
208 | |||
209 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
210 | |||
211 | if (test_and_clear_bit(0, &ctx->irq_flags)) { | ||
212 | val = readl(ctx->regs + VIDINTCON0); | ||
213 | |||
214 | val &= ~VIDINTCON0_INT_FRAME; | ||
215 | val &= ~VIDINTCON0_INT_ENABLE; | ||
216 | |||
217 | writel(val, ctx->regs + VIDINTCON0); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | static struct exynos_drm_manager_ops fimd_manager_ops = { | ||
222 | .commit = fimd_commit, | ||
223 | .enable_vblank = fimd_enable_vblank, | ||
224 | .disable_vblank = fimd_disable_vblank, | ||
225 | }; | ||
226 | |||
227 | static void fimd_win_mode_set(struct device *dev, | ||
228 | struct exynos_drm_overlay *overlay) | ||
229 | { | ||
230 | struct fimd_context *ctx = get_fimd_context(dev); | ||
231 | struct fimd_win_data *win_data; | ||
232 | unsigned long offset; | ||
233 | |||
234 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
235 | |||
236 | if (!overlay) { | ||
237 | dev_err(dev, "overlay is NULL\n"); | ||
238 | return; | ||
239 | } | ||
240 | |||
241 | offset = overlay->fb_x * (overlay->bpp >> 3); | ||
242 | offset += overlay->fb_y * overlay->pitch; | ||
243 | |||
244 | DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch); | ||
245 | |||
246 | win_data = &ctx->win_data[ctx->default_win]; | ||
247 | |||
248 | win_data->offset_x = overlay->crtc_x; | ||
249 | win_data->offset_y = overlay->crtc_y; | ||
250 | win_data->ovl_width = overlay->crtc_width; | ||
251 | win_data->ovl_height = overlay->crtc_height; | ||
252 | win_data->fb_width = overlay->fb_width; | ||
253 | win_data->fb_height = overlay->fb_height; | ||
254 | win_data->paddr = overlay->paddr + offset; | ||
255 | win_data->vaddr = overlay->vaddr + offset; | ||
256 | win_data->bpp = overlay->bpp; | ||
257 | win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * | ||
258 | (overlay->bpp >> 3); | ||
259 | win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3); | ||
260 | |||
261 | DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", | ||
262 | win_data->offset_x, win_data->offset_y); | ||
263 | DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", | ||
264 | win_data->ovl_width, win_data->ovl_height); | ||
265 | DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", | ||
266 | (unsigned long)win_data->paddr, | ||
267 | (unsigned long)win_data->vaddr); | ||
268 | DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", | ||
269 | overlay->fb_width, overlay->crtc_width); | ||
270 | } | ||
271 | |||
272 | static void fimd_win_set_pixfmt(struct device *dev, unsigned int win) | ||
273 | { | ||
274 | struct fimd_context *ctx = get_fimd_context(dev); | ||
275 | struct fimd_win_data *win_data = &ctx->win_data[win]; | ||
276 | unsigned long val; | ||
277 | |||
278 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
279 | |||
280 | val = WINCONx_ENWIN; | ||
281 | |||
282 | switch (win_data->bpp) { | ||
283 | case 1: | ||
284 | val |= WINCON0_BPPMODE_1BPP; | ||
285 | val |= WINCONx_BITSWP; | ||
286 | val |= WINCONx_BURSTLEN_4WORD; | ||
287 | break; | ||
288 | case 2: | ||
289 | val |= WINCON0_BPPMODE_2BPP; | ||
290 | val |= WINCONx_BITSWP; | ||
291 | val |= WINCONx_BURSTLEN_8WORD; | ||
292 | break; | ||
293 | case 4: | ||
294 | val |= WINCON0_BPPMODE_4BPP; | ||
295 | val |= WINCONx_BITSWP; | ||
296 | val |= WINCONx_BURSTLEN_8WORD; | ||
297 | break; | ||
298 | case 8: | ||
299 | val |= WINCON0_BPPMODE_8BPP_PALETTE; | ||
300 | val |= WINCONx_BURSTLEN_8WORD; | ||
301 | val |= WINCONx_BYTSWP; | ||
302 | break; | ||
303 | case 16: | ||
304 | val |= WINCON0_BPPMODE_16BPP_565; | ||
305 | val |= WINCONx_HAWSWP; | ||
306 | val |= WINCONx_BURSTLEN_16WORD; | ||
307 | break; | ||
308 | case 24: | ||
309 | val |= WINCON0_BPPMODE_24BPP_888; | ||
310 | val |= WINCONx_WSWP; | ||
311 | val |= WINCONx_BURSTLEN_16WORD; | ||
312 | break; | ||
313 | case 32: | ||
314 | val |= WINCON1_BPPMODE_28BPP_A4888 | ||
315 | | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL; | ||
316 | val |= WINCONx_WSWP; | ||
317 | val |= WINCONx_BURSTLEN_16WORD; | ||
318 | break; | ||
319 | default: | ||
320 | DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n"); | ||
321 | |||
322 | val |= WINCON0_BPPMODE_24BPP_888; | ||
323 | val |= WINCONx_WSWP; | ||
324 | val |= WINCONx_BURSTLEN_16WORD; | ||
325 | break; | ||
326 | } | ||
327 | |||
328 | DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp); | ||
329 | |||
330 | writel(val, ctx->regs + WINCON(win)); | ||
331 | } | ||
332 | |||
333 | static void fimd_win_set_colkey(struct device *dev, unsigned int win) | ||
334 | { | ||
335 | struct fimd_context *ctx = get_fimd_context(dev); | ||
336 | unsigned int keycon0 = 0, keycon1 = 0; | ||
337 | |||
338 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
339 | |||
340 | keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F | | ||
341 | WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0); | ||
342 | |||
343 | keycon1 = WxKEYCON1_COLVAL(0xffffffff); | ||
344 | |||
345 | writel(keycon0, ctx->regs + WKEYCON0_BASE(win)); | ||
346 | writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); | ||
347 | } | ||
348 | |||
349 | static void fimd_win_commit(struct device *dev) | ||
350 | { | ||
351 | struct fimd_context *ctx = get_fimd_context(dev); | ||
352 | struct fimd_win_data *win_data; | ||
353 | int win = ctx->default_win; | ||
354 | unsigned long val, alpha, size; | ||
355 | |||
356 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
357 | |||
358 | if (win < 0 || win > WINDOWS_NR) | ||
359 | return; | ||
360 | |||
361 | win_data = &ctx->win_data[win]; | ||
362 | |||
363 | /* | ||
364 | * SHADOWCON register is used for enabling timing. | ||
365 | * | ||
366 | * for example, once only width value of a register is set, | ||
367 | * if the dma is started then fimd hardware could malfunction so | ||
368 | * with protect window setting, the register fields with prefix '_F' | ||
369 | * wouldn't be updated at vsync also but updated once unprotect window | ||
370 | * is set. | ||
371 | */ | ||
372 | |||
373 | /* protect windows */ | ||
374 | val = readl(ctx->regs + SHADOWCON); | ||
375 | val |= SHADOWCON_WINx_PROTECT(win); | ||
376 | writel(val, ctx->regs + SHADOWCON); | ||
377 | |||
378 | /* buffer start address */ | ||
379 | val = win_data->paddr; | ||
380 | writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); | ||
381 | |||
382 | /* buffer end address */ | ||
383 | size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); | ||
384 | val = win_data->paddr + size; | ||
385 | writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); | ||
386 | |||
387 | DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", | ||
388 | (unsigned long)win_data->paddr, val, size); | ||
389 | DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", | ||
390 | win_data->ovl_width, win_data->ovl_height); | ||
391 | |||
392 | /* buffer size */ | ||
393 | val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) | | ||
394 | VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size); | ||
395 | writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0)); | ||
396 | |||
397 | /* OSD position */ | ||
398 | val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) | | ||
399 | VIDOSDxA_TOPLEFT_Y(win_data->offset_y); | ||
400 | writel(val, ctx->regs + VIDOSD_A(win)); | ||
401 | |||
402 | val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x + | ||
403 | win_data->ovl_width - 1) | | ||
404 | VIDOSDxB_BOTRIGHT_Y(win_data->offset_y + | ||
405 | win_data->ovl_height - 1); | ||
406 | writel(val, ctx->regs + VIDOSD_B(win)); | ||
407 | |||
408 | DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", | ||
409 | win_data->offset_x, win_data->offset_y, | ||
410 | win_data->offset_x + win_data->ovl_width - 1, | ||
411 | win_data->offset_y + win_data->ovl_height - 1); | ||
412 | |||
413 | /* hardware window 0 doesn't support alpha channel. */ | ||
414 | if (win != 0) { | ||
415 | /* OSD alpha */ | ||
416 | alpha = VIDISD14C_ALPHA1_R(0xf) | | ||
417 | VIDISD14C_ALPHA1_G(0xf) | | ||
418 | VIDISD14C_ALPHA1_B(0xf); | ||
419 | |||
420 | writel(alpha, ctx->regs + VIDOSD_C(win)); | ||
421 | } | ||
422 | |||
423 | /* OSD size */ | ||
424 | if (win != 3 && win != 4) { | ||
425 | u32 offset = VIDOSD_D(win); | ||
426 | if (win == 0) | ||
427 | offset = VIDOSD_C_SIZE_W0; | ||
428 | val = win_data->ovl_width * win_data->ovl_height; | ||
429 | writel(val, ctx->regs + offset); | ||
430 | |||
431 | DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val); | ||
432 | } | ||
433 | |||
434 | fimd_win_set_pixfmt(dev, win); | ||
435 | |||
436 | /* hardware window 0 doesn't support color key. */ | ||
437 | if (win != 0) | ||
438 | fimd_win_set_colkey(dev, win); | ||
439 | |||
440 | /* Enable DMA channel and unprotect windows */ | ||
441 | val = readl(ctx->regs + SHADOWCON); | ||
442 | val |= SHADOWCON_CHx_ENABLE(win); | ||
443 | val &= ~SHADOWCON_WINx_PROTECT(win); | ||
444 | writel(val, ctx->regs + SHADOWCON); | ||
445 | } | ||
446 | |||
447 | static void fimd_win_disable(struct device *dev) | ||
448 | { | ||
449 | struct fimd_context *ctx = get_fimd_context(dev); | ||
450 | struct fimd_win_data *win_data; | ||
451 | int win = ctx->default_win; | ||
452 | u32 val; | ||
453 | |||
454 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
455 | |||
456 | if (win < 0 || win > WINDOWS_NR) | ||
457 | return; | ||
458 | |||
459 | win_data = &ctx->win_data[win]; | ||
460 | |||
461 | /* protect windows */ | ||
462 | val = readl(ctx->regs + SHADOWCON); | ||
463 | val |= SHADOWCON_WINx_PROTECT(win); | ||
464 | writel(val, ctx->regs + SHADOWCON); | ||
465 | |||
466 | /* wincon */ | ||
467 | val = readl(ctx->regs + WINCON(win)); | ||
468 | val &= ~WINCONx_ENWIN; | ||
469 | writel(val, ctx->regs + WINCON(win)); | ||
470 | |||
471 | /* unprotect windows */ | ||
472 | val = readl(ctx->regs + SHADOWCON); | ||
473 | val &= ~SHADOWCON_CHx_ENABLE(win); | ||
474 | val &= ~SHADOWCON_WINx_PROTECT(win); | ||
475 | writel(val, ctx->regs + SHADOWCON); | ||
476 | } | ||
477 | |||
478 | static struct exynos_drm_overlay_ops fimd_overlay_ops = { | ||
479 | .mode_set = fimd_win_mode_set, | ||
480 | .commit = fimd_win_commit, | ||
481 | .disable = fimd_win_disable, | ||
482 | }; | ||
483 | |||
484 | static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) | ||
485 | { | ||
486 | struct exynos_drm_private *dev_priv = drm_dev->dev_private; | ||
487 | struct drm_pending_vblank_event *e, *t; | ||
488 | struct timeval now; | ||
489 | unsigned long flags; | ||
490 | bool is_checked = false; | ||
491 | |||
492 | spin_lock_irqsave(&drm_dev->event_lock, flags); | ||
493 | |||
494 | list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list, | ||
495 | base.link) { | ||
496 | /* if event's pipe isn't same as crtc then ignore it. */ | ||
497 | if (crtc != e->pipe) | ||
498 | continue; | ||
499 | |||
500 | is_checked = true; | ||
501 | |||
502 | do_gettimeofday(&now); | ||
503 | e->event.sequence = 0; | ||
504 | e->event.tv_sec = now.tv_sec; | ||
505 | e->event.tv_usec = now.tv_usec; | ||
506 | |||
507 | list_move_tail(&e->base.link, &e->base.file_priv->event_list); | ||
508 | wake_up_interruptible(&e->base.file_priv->event_wait); | ||
509 | } | ||
510 | |||
511 | if (is_checked) | ||
512 | drm_vblank_put(drm_dev, crtc); | ||
513 | |||
514 | spin_unlock_irqrestore(&drm_dev->event_lock, flags); | ||
515 | } | ||
516 | |||
517 | static irqreturn_t fimd_irq_handler(int irq, void *dev_id) | ||
518 | { | ||
519 | struct fimd_context *ctx = (struct fimd_context *)dev_id; | ||
520 | struct exynos_drm_subdrv *subdrv = &ctx->subdrv; | ||
521 | struct drm_device *drm_dev = subdrv->drm_dev; | ||
522 | struct exynos_drm_manager *manager = &subdrv->manager; | ||
523 | u32 val; | ||
524 | |||
525 | val = readl(ctx->regs + VIDINTCON1); | ||
526 | |||
527 | if (val & VIDINTCON1_INT_FRAME) | ||
528 | /* VSYNC interrupt */ | ||
529 | writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); | ||
530 | |||
531 | drm_handle_vblank(drm_dev, manager->pipe); | ||
532 | fimd_finish_pageflip(drm_dev, manager->pipe); | ||
533 | |||
534 | return IRQ_HANDLED; | ||
535 | } | ||
536 | |||
537 | static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | ||
538 | { | ||
539 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
540 | |||
541 | /* | ||
542 | * enable drm irq mode. | ||
543 | * - with irq_enabled = 1, we can use the vblank feature. | ||
544 | * | ||
545 | * P.S. note that we wouldn't use drm irq handler but | ||
546 | * just specific driver own one instead because | ||
547 | * drm framework supports only one irq handler. | ||
548 | */ | ||
549 | drm_dev->irq_enabled = 1; | ||
550 | |||
551 | /* | ||
552 | * with vblank_disable_allowed = 1, vblank interrupt will be disabled | ||
553 | * by drm timer once a current process gives up ownership of | ||
554 | * vblank event.(drm_vblank_put function was called) | ||
555 | */ | ||
556 | drm_dev->vblank_disable_allowed = 1; | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | static void fimd_subdrv_remove(struct drm_device *drm_dev) | ||
562 | { | ||
563 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
564 | |||
565 | /* TODO. */ | ||
566 | } | ||
567 | |||
568 | static int fimd_calc_clkdiv(struct fimd_context *ctx, | ||
569 | struct fb_videomode *timing) | ||
570 | { | ||
571 | unsigned long clk = clk_get_rate(ctx->lcd_clk); | ||
572 | u32 retrace; | ||
573 | u32 clkdiv; | ||
574 | u32 best_framerate = 0; | ||
575 | u32 framerate; | ||
576 | |||
577 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
578 | |||
579 | retrace = timing->left_margin + timing->hsync_len + | ||
580 | timing->right_margin + timing->xres; | ||
581 | retrace *= timing->upper_margin + timing->vsync_len + | ||
582 | timing->lower_margin + timing->yres; | ||
583 | |||
584 | /* default framerate is 60Hz */ | ||
585 | if (!timing->refresh) | ||
586 | timing->refresh = 60; | ||
587 | |||
588 | clk /= retrace; | ||
589 | |||
590 | for (clkdiv = 1; clkdiv < 0x100; clkdiv++) { | ||
591 | int tmp; | ||
592 | |||
593 | /* get best framerate */ | ||
594 | framerate = clk / clkdiv; | ||
595 | tmp = timing->refresh - framerate; | ||
596 | if (tmp < 0) { | ||
597 | best_framerate = framerate; | ||
598 | continue; | ||
599 | } else { | ||
600 | if (!best_framerate) | ||
601 | best_framerate = framerate; | ||
602 | else if (tmp < (best_framerate - framerate)) | ||
603 | best_framerate = framerate; | ||
604 | break; | ||
605 | } | ||
606 | } | ||
607 | |||
608 | return clkdiv; | ||
609 | } | ||
610 | |||
611 | static void fimd_clear_win(struct fimd_context *ctx, int win) | ||
612 | { | ||
613 | u32 val; | ||
614 | |||
615 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
616 | |||
617 | writel(0, ctx->regs + WINCON(win)); | ||
618 | writel(0, ctx->regs + VIDOSD_A(win)); | ||
619 | writel(0, ctx->regs + VIDOSD_B(win)); | ||
620 | writel(0, ctx->regs + VIDOSD_C(win)); | ||
621 | |||
622 | if (win == 1 || win == 2) | ||
623 | writel(0, ctx->regs + VIDOSD_D(win)); | ||
624 | |||
625 | val = readl(ctx->regs + SHADOWCON); | ||
626 | val &= ~SHADOWCON_WINx_PROTECT(win); | ||
627 | writel(val, ctx->regs + SHADOWCON); | ||
628 | } | ||
629 | |||
630 | static int __devinit fimd_probe(struct platform_device *pdev) | ||
631 | { | ||
632 | struct device *dev = &pdev->dev; | ||
633 | struct fimd_context *ctx; | ||
634 | struct exynos_drm_subdrv *subdrv; | ||
635 | struct exynos_drm_fimd_pdata *pdata; | ||
636 | struct fb_videomode *timing; | ||
637 | struct resource *res; | ||
638 | int win; | ||
639 | int ret = -EINVAL; | ||
640 | |||
641 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
642 | |||
643 | pdata = pdev->dev.platform_data; | ||
644 | if (!pdata) { | ||
645 | dev_err(dev, "no platform data specified\n"); | ||
646 | return -EINVAL; | ||
647 | } | ||
648 | |||
649 | timing = &pdata->timing; | ||
650 | if (!timing) { | ||
651 | dev_err(dev, "timing is null.\n"); | ||
652 | return -EINVAL; | ||
653 | } | ||
654 | |||
655 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
656 | if (!ctx) | ||
657 | return -ENOMEM; | ||
658 | |||
659 | ctx->bus_clk = clk_get(dev, "fimd"); | ||
660 | if (IS_ERR(ctx->bus_clk)) { | ||
661 | dev_err(dev, "failed to get bus clock\n"); | ||
662 | ret = PTR_ERR(ctx->bus_clk); | ||
663 | goto err_clk_get; | ||
664 | } | ||
665 | |||
666 | clk_enable(ctx->bus_clk); | ||
667 | |||
668 | ctx->lcd_clk = clk_get(dev, "sclk_fimd"); | ||
669 | if (IS_ERR(ctx->lcd_clk)) { | ||
670 | dev_err(dev, "failed to get lcd clock\n"); | ||
671 | ret = PTR_ERR(ctx->lcd_clk); | ||
672 | goto err_bus_clk; | ||
673 | } | ||
674 | |||
675 | clk_enable(ctx->lcd_clk); | ||
676 | |||
677 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
678 | if (!res) { | ||
679 | dev_err(dev, "failed to find registers\n"); | ||
680 | ret = -ENOENT; | ||
681 | goto err_clk; | ||
682 | } | ||
683 | |||
684 | ctx->regs_res = request_mem_region(res->start, resource_size(res), | ||
685 | dev_name(dev)); | ||
686 | if (!ctx->regs_res) { | ||
687 | dev_err(dev, "failed to claim register region\n"); | ||
688 | ret = -ENOENT; | ||
689 | goto err_clk; | ||
690 | } | ||
691 | |||
692 | ctx->regs = ioremap(res->start, resource_size(res)); | ||
693 | if (!ctx->regs) { | ||
694 | dev_err(dev, "failed to map registers\n"); | ||
695 | ret = -ENXIO; | ||
696 | goto err_req_region_io; | ||
697 | } | ||
698 | |||
699 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
700 | if (!res) { | ||
701 | dev_err(dev, "irq request failed.\n"); | ||
702 | goto err_req_region_irq; | ||
703 | } | ||
704 | |||
705 | ctx->irq = res->start; | ||
706 | |||
707 | for (win = 0; win < WINDOWS_NR; win++) | ||
708 | fimd_clear_win(ctx, win); | ||
709 | |||
710 | ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx); | ||
711 | if (ret < 0) { | ||
712 | dev_err(dev, "irq request failed.\n"); | ||
713 | goto err_req_irq; | ||
714 | } | ||
715 | |||
716 | ctx->clkdiv = fimd_calc_clkdiv(ctx, timing); | ||
717 | ctx->vidcon0 = pdata->vidcon0; | ||
718 | ctx->vidcon1 = pdata->vidcon1; | ||
719 | ctx->default_win = pdata->default_win; | ||
720 | ctx->timing = timing; | ||
721 | |||
722 | timing->pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv; | ||
723 | |||
724 | DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n", | ||
725 | timing->pixclock, ctx->clkdiv); | ||
726 | |||
727 | subdrv = &ctx->subdrv; | ||
728 | |||
729 | subdrv->probe = fimd_subdrv_probe; | ||
730 | subdrv->remove = fimd_subdrv_remove; | ||
731 | subdrv->manager.pipe = -1; | ||
732 | subdrv->manager.ops = &fimd_manager_ops; | ||
733 | subdrv->manager.overlay_ops = &fimd_overlay_ops; | ||
734 | subdrv->manager.display = &fimd_display; | ||
735 | subdrv->manager.dev = dev; | ||
736 | |||
737 | platform_set_drvdata(pdev, ctx); | ||
738 | exynos_drm_subdrv_register(subdrv); | ||
739 | |||
740 | return 0; | ||
741 | |||
742 | err_req_irq: | ||
743 | err_req_region_irq: | ||
744 | iounmap(ctx->regs); | ||
745 | |||
746 | err_req_region_io: | ||
747 | release_resource(ctx->regs_res); | ||
748 | kfree(ctx->regs_res); | ||
749 | |||
750 | err_clk: | ||
751 | clk_disable(ctx->lcd_clk); | ||
752 | clk_put(ctx->lcd_clk); | ||
753 | |||
754 | err_bus_clk: | ||
755 | clk_disable(ctx->bus_clk); | ||
756 | clk_put(ctx->bus_clk); | ||
757 | |||
758 | err_clk_get: | ||
759 | kfree(ctx); | ||
760 | return ret; | ||
761 | } | ||
762 | |||
763 | static int __devexit fimd_remove(struct platform_device *pdev) | ||
764 | { | ||
765 | struct fimd_context *ctx = platform_get_drvdata(pdev); | ||
766 | |||
767 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
768 | |||
769 | exynos_drm_subdrv_unregister(&ctx->subdrv); | ||
770 | |||
771 | clk_disable(ctx->lcd_clk); | ||
772 | clk_disable(ctx->bus_clk); | ||
773 | clk_put(ctx->lcd_clk); | ||
774 | clk_put(ctx->bus_clk); | ||
775 | |||
776 | iounmap(ctx->regs); | ||
777 | release_resource(ctx->regs_res); | ||
778 | kfree(ctx->regs_res); | ||
779 | free_irq(ctx->irq, ctx); | ||
780 | |||
781 | kfree(ctx); | ||
782 | |||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | static struct platform_driver fimd_driver = { | ||
787 | .probe = fimd_probe, | ||
788 | .remove = __devexit_p(fimd_remove), | ||
789 | .driver = { | ||
790 | .name = "exynos4-fb", | ||
791 | .owner = THIS_MODULE, | ||
792 | }, | ||
793 | }; | ||
794 | |||
795 | static int __init fimd_init(void) | ||
796 | { | ||
797 | return platform_driver_register(&fimd_driver); | ||
798 | } | ||
799 | |||
800 | static void __exit fimd_exit(void) | ||
801 | { | ||
802 | platform_driver_unregister(&fimd_driver); | ||
803 | } | ||
804 | |||
805 | module_init(fimd_init); | ||
806 | module_exit(fimd_exit); | ||
807 | |||
808 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
809 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
810 | MODULE_DESCRIPTION("Samsung DRM FIMD Driver"); | ||
811 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c new file mode 100644 index 000000000000..a8e7a88906ed --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -0,0 +1,415 @@ | |||
1 | /* exynos_drm_gem.c | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Author: Inki Dae <inki.dae@samsung.com> | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
23 | * OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #include "drmP.h" | ||
27 | #include "drm.h" | ||
28 | |||
29 | #include <drm/exynos_drm.h> | ||
30 | |||
31 | #include "exynos_drm_drv.h" | ||
32 | #include "exynos_drm_gem.h" | ||
33 | #include "exynos_drm_buf.h" | ||
34 | |||
35 | static unsigned int convert_to_vm_err_msg(int msg) | ||
36 | { | ||
37 | unsigned int out_msg; | ||
38 | |||
39 | switch (msg) { | ||
40 | case 0: | ||
41 | case -ERESTARTSYS: | ||
42 | case -EINTR: | ||
43 | out_msg = VM_FAULT_NOPAGE; | ||
44 | break; | ||
45 | |||
46 | case -ENOMEM: | ||
47 | out_msg = VM_FAULT_OOM; | ||
48 | break; | ||
49 | |||
50 | default: | ||
51 | out_msg = VM_FAULT_SIGBUS; | ||
52 | break; | ||
53 | } | ||
54 | |||
55 | return out_msg; | ||
56 | } | ||
57 | |||
58 | static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) | ||
59 | { | ||
60 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
61 | |||
62 | return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; | ||
63 | } | ||
64 | |||
65 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, | ||
66 | struct drm_device *dev, unsigned int size, | ||
67 | unsigned int *handle) | ||
68 | { | ||
69 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
70 | struct exynos_drm_buf_entry *entry; | ||
71 | struct drm_gem_object *obj; | ||
72 | int ret; | ||
73 | |||
74 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
75 | |||
76 | size = roundup(size, PAGE_SIZE); | ||
77 | |||
78 | exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); | ||
79 | if (!exynos_gem_obj) { | ||
80 | DRM_ERROR("failed to allocate exynos gem object.\n"); | ||
81 | return ERR_PTR(-ENOMEM); | ||
82 | } | ||
83 | |||
84 | /* allocate the new buffer object and memory region. */ | ||
85 | entry = exynos_drm_buf_create(dev, size); | ||
86 | if (!entry) { | ||
87 | kfree(exynos_gem_obj); | ||
88 | return ERR_PTR(-ENOMEM); | ||
89 | } | ||
90 | |||
91 | exynos_gem_obj->entry = entry; | ||
92 | |||
93 | obj = &exynos_gem_obj->base; | ||
94 | |||
95 | ret = drm_gem_object_init(dev, obj, size); | ||
96 | if (ret < 0) { | ||
97 | DRM_ERROR("failed to initailize gem object.\n"); | ||
98 | goto err_obj_init; | ||
99 | } | ||
100 | |||
101 | DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); | ||
102 | |||
103 | ret = drm_gem_create_mmap_offset(obj); | ||
104 | if (ret < 0) { | ||
105 | DRM_ERROR("failed to allocate mmap offset.\n"); | ||
106 | goto err_create_mmap_offset; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * allocate a id of idr table where the obj is registered | ||
111 | * and handle has the id what user can see. | ||
112 | */ | ||
113 | ret = drm_gem_handle_create(file_priv, obj, handle); | ||
114 | if (ret) | ||
115 | goto err_handle_create; | ||
116 | |||
117 | DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); | ||
118 | |||
119 | /* drop reference from allocate - handle holds it now. */ | ||
120 | drm_gem_object_unreference_unlocked(obj); | ||
121 | |||
122 | return exynos_gem_obj; | ||
123 | |||
124 | err_handle_create: | ||
125 | drm_gem_free_mmap_offset(obj); | ||
126 | |||
127 | err_create_mmap_offset: | ||
128 | drm_gem_object_release(obj); | ||
129 | |||
130 | err_obj_init: | ||
131 | exynos_drm_buf_destroy(dev, exynos_gem_obj->entry); | ||
132 | |||
133 | kfree(exynos_gem_obj); | ||
134 | |||
135 | return ERR_PTR(ret); | ||
136 | } | ||
137 | |||
138 | int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, | ||
139 | struct drm_file *file_priv) | ||
140 | { | ||
141 | struct drm_exynos_gem_create *args = data; | ||
142 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
143 | |||
144 | DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size); | ||
145 | |||
146 | exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, | ||
147 | &args->handle); | ||
148 | if (IS_ERR(exynos_gem_obj)) | ||
149 | return PTR_ERR(exynos_gem_obj); | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, | ||
155 | struct drm_file *file_priv) | ||
156 | { | ||
157 | struct drm_exynos_gem_map_off *args = data; | ||
158 | |||
159 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
160 | |||
161 | DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n", | ||
162 | args->handle, (unsigned long)args->offset); | ||
163 | |||
164 | if (!(dev->driver->driver_features & DRIVER_GEM)) { | ||
165 | DRM_ERROR("does not support GEM.\n"); | ||
166 | return -ENODEV; | ||
167 | } | ||
168 | |||
169 | return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle, | ||
170 | &args->offset); | ||
171 | } | ||
172 | |||
173 | static int exynos_drm_gem_mmap_buffer(struct file *filp, | ||
174 | struct vm_area_struct *vma) | ||
175 | { | ||
176 | struct drm_gem_object *obj = filp->private_data; | ||
177 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | ||
178 | struct exynos_drm_buf_entry *entry; | ||
179 | unsigned long pfn, vm_size; | ||
180 | |||
181 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
182 | |||
183 | vma->vm_flags |= (VM_IO | VM_RESERVED); | ||
184 | |||
185 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
186 | vma->vm_file = filp; | ||
187 | |||
188 | vm_size = vma->vm_end - vma->vm_start; | ||
189 | /* | ||
190 | * a entry contains information to physically continuous memory | ||
191 | * allocated by user request or at framebuffer creation. | ||
192 | */ | ||
193 | entry = exynos_gem_obj->entry; | ||
194 | |||
195 | /* check if user-requested size is valid. */ | ||
196 | if (vm_size > entry->size) | ||
197 | return -EINVAL; | ||
198 | |||
199 | /* | ||
200 | * get page frame number to physical memory to be mapped | ||
201 | * to user space. | ||
202 | */ | ||
203 | pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT; | ||
204 | |||
205 | DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); | ||
206 | |||
207 | if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size, | ||
208 | vma->vm_page_prot)) { | ||
209 | DRM_ERROR("failed to remap pfn range.\n"); | ||
210 | return -EAGAIN; | ||
211 | } | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static const struct file_operations exynos_drm_gem_fops = { | ||
217 | .mmap = exynos_drm_gem_mmap_buffer, | ||
218 | }; | ||
219 | |||
220 | int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, | ||
221 | struct drm_file *file_priv) | ||
222 | { | ||
223 | struct drm_exynos_gem_mmap *args = data; | ||
224 | struct drm_gem_object *obj; | ||
225 | unsigned int addr; | ||
226 | |||
227 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
228 | |||
229 | if (!(dev->driver->driver_features & DRIVER_GEM)) { | ||
230 | DRM_ERROR("does not support GEM.\n"); | ||
231 | return -ENODEV; | ||
232 | } | ||
233 | |||
234 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
235 | if (!obj) { | ||
236 | DRM_ERROR("failed to lookup gem object.\n"); | ||
237 | return -EINVAL; | ||
238 | } | ||
239 | |||
240 | obj->filp->f_op = &exynos_drm_gem_fops; | ||
241 | obj->filp->private_data = obj; | ||
242 | |||
243 | down_write(¤t->mm->mmap_sem); | ||
244 | addr = do_mmap(obj->filp, 0, args->size, | ||
245 | PROT_READ | PROT_WRITE, MAP_SHARED, 0); | ||
246 | up_write(¤t->mm->mmap_sem); | ||
247 | |||
248 | drm_gem_object_unreference_unlocked(obj); | ||
249 | |||
250 | if (IS_ERR((void *)addr)) | ||
251 | return PTR_ERR((void *)addr); | ||
252 | |||
253 | args->mapped = addr; | ||
254 | |||
255 | DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped); | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | int exynos_drm_gem_init_object(struct drm_gem_object *obj) | ||
261 | { | ||
262 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj) | ||
268 | { | ||
269 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
270 | |||
271 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
272 | |||
273 | DRM_DEBUG_KMS("handle count = %d\n", | ||
274 | atomic_read(&gem_obj->handle_count)); | ||
275 | |||
276 | if (gem_obj->map_list.map) | ||
277 | drm_gem_free_mmap_offset(gem_obj); | ||
278 | |||
279 | /* release file pointer to gem object. */ | ||
280 | drm_gem_object_release(gem_obj); | ||
281 | |||
282 | exynos_gem_obj = to_exynos_gem_obj(gem_obj); | ||
283 | |||
284 | exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry); | ||
285 | |||
286 | kfree(exynos_gem_obj); | ||
287 | } | ||
288 | |||
289 | int exynos_drm_gem_dumb_create(struct drm_file *file_priv, | ||
290 | struct drm_device *dev, struct drm_mode_create_dumb *args) | ||
291 | { | ||
292 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
293 | |||
294 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
295 | |||
296 | /* | ||
297 | * alocate memory to be used for framebuffer. | ||
298 | * - this callback would be called by user application | ||
299 | * with DRM_IOCTL_MODE_CREATE_DUMB command. | ||
300 | */ | ||
301 | |||
302 | args->pitch = args->width * args->bpp >> 3; | ||
303 | args->size = args->pitch * args->height; | ||
304 | |||
305 | exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, | ||
306 | &args->handle); | ||
307 | if (IS_ERR(exynos_gem_obj)) | ||
308 | return PTR_ERR(exynos_gem_obj); | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, | ||
314 | struct drm_device *dev, uint32_t handle, uint64_t *offset) | ||
315 | { | ||
316 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
317 | struct drm_gem_object *obj; | ||
318 | |||
319 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
320 | |||
321 | mutex_lock(&dev->struct_mutex); | ||
322 | |||
323 | /* | ||
324 | * get offset of memory allocated for drm framebuffer. | ||
325 | * - this callback would be called by user application | ||
326 | * with DRM_IOCTL_MODE_MAP_DUMB command. | ||
327 | */ | ||
328 | |||
329 | obj = drm_gem_object_lookup(dev, file_priv, handle); | ||
330 | if (!obj) { | ||
331 | DRM_ERROR("failed to lookup gem object.\n"); | ||
332 | mutex_unlock(&dev->struct_mutex); | ||
333 | return -EINVAL; | ||
334 | } | ||
335 | |||
336 | exynos_gem_obj = to_exynos_gem_obj(obj); | ||
337 | |||
338 | *offset = get_gem_mmap_offset(&exynos_gem_obj->base); | ||
339 | |||
340 | drm_gem_object_unreference(obj); | ||
341 | |||
342 | DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); | ||
343 | |||
344 | mutex_unlock(&dev->struct_mutex); | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
350 | { | ||
351 | struct drm_gem_object *obj = vma->vm_private_data; | ||
352 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | ||
353 | struct drm_device *dev = obj->dev; | ||
354 | unsigned long pfn; | ||
355 | pgoff_t page_offset; | ||
356 | int ret; | ||
357 | |||
358 | page_offset = ((unsigned long)vmf->virtual_address - | ||
359 | vma->vm_start) >> PAGE_SHIFT; | ||
360 | |||
361 | mutex_lock(&dev->struct_mutex); | ||
362 | |||
363 | pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset; | ||
364 | |||
365 | ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); | ||
366 | |||
367 | mutex_unlock(&dev->struct_mutex); | ||
368 | |||
369 | return convert_to_vm_err_msg(ret); | ||
370 | } | ||
371 | |||
372 | int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | ||
373 | { | ||
374 | int ret; | ||
375 | |||
376 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
377 | |||
378 | /* set vm_area_struct. */ | ||
379 | ret = drm_gem_mmap(filp, vma); | ||
380 | if (ret < 0) { | ||
381 | DRM_ERROR("failed to mmap.\n"); | ||
382 | return ret; | ||
383 | } | ||
384 | |||
385 | vma->vm_flags &= ~VM_PFNMAP; | ||
386 | vma->vm_flags |= VM_MIXEDMAP; | ||
387 | |||
388 | return ret; | ||
389 | } | ||
390 | |||
391 | |||
392 | int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv, | ||
393 | struct drm_device *dev, unsigned int handle) | ||
394 | { | ||
395 | int ret; | ||
396 | |||
397 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
398 | |||
399 | /* | ||
400 | * obj->refcount and obj->handle_count are decreased and | ||
401 | * if both them are 0 then exynos_drm_gem_free_object() | ||
402 | * would be called by callback to release resources. | ||
403 | */ | ||
404 | ret = drm_gem_handle_delete(file_priv, handle); | ||
405 | if (ret < 0) { | ||
406 | DRM_ERROR("failed to delete drm_gem_handle.\n"); | ||
407 | return ret; | ||
408 | } | ||
409 | |||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
414 | MODULE_DESCRIPTION("Samsung SoC DRM GEM Module"); | ||
415 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h new file mode 100644 index 000000000000..e5fc0148277b --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
@@ -0,0 +1,107 @@ | |||
1 | /* exynos_drm_gem.h | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Authoer: Inki Dae <inki.dae@samsung.com> | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
23 | * OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #ifndef _EXYNOS_DRM_GEM_H_ | ||
27 | #define _EXYNOS_DRM_GEM_H_ | ||
28 | |||
29 | #define to_exynos_gem_obj(x) container_of(x,\ | ||
30 | struct exynos_drm_gem_obj, base) | ||
31 | |||
32 | /* | ||
33 | * exynos drm buffer structure. | ||
34 | * | ||
35 | * @base: a gem object. | ||
36 | * - a new handle to this gem object would be created | ||
37 | * by drm_gem_handle_create(). | ||
38 | * @entry: pointer to exynos drm buffer entry object. | ||
39 | * - containing the information to physically | ||
40 | * continuous memory region allocated by user request | ||
41 | * or at framebuffer creation. | ||
42 | * | ||
43 | * P.S. this object would be transfered to user as kms_bo.handle so | ||
44 | * user can access the buffer through kms_bo.handle. | ||
45 | */ | ||
46 | struct exynos_drm_gem_obj { | ||
47 | struct drm_gem_object base; | ||
48 | struct exynos_drm_buf_entry *entry; | ||
49 | }; | ||
50 | |||
51 | /* create a new buffer and get a new gem handle. */ | ||
52 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, | ||
53 | struct drm_device *dev, unsigned int size, | ||
54 | unsigned int *handle); | ||
55 | |||
56 | /* | ||
57 | * request gem object creation and buffer allocation as the size | ||
58 | * that it is calculated with framebuffer information such as width, | ||
59 | * height and bpp. | ||
60 | */ | ||
61 | int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, | ||
62 | struct drm_file *file_priv); | ||
63 | |||
64 | /* get buffer offset to map to user space. */ | ||
65 | int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, | ||
66 | struct drm_file *file_priv); | ||
67 | |||
68 | /* unmap a buffer from user space. */ | ||
69 | int exynos_drm_gem_munmap_ioctl(struct drm_device *dev, void *data, | ||
70 | struct drm_file *file_priv); | ||
71 | |||
72 | /* initialize gem object. */ | ||
73 | int exynos_drm_gem_init_object(struct drm_gem_object *obj); | ||
74 | |||
75 | /* free gem object. */ | ||
76 | void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj); | ||
77 | |||
78 | /* create memory region for drm framebuffer. */ | ||
79 | int exynos_drm_gem_dumb_create(struct drm_file *file_priv, | ||
80 | struct drm_device *dev, struct drm_mode_create_dumb *args); | ||
81 | |||
82 | /* map memory region for drm framebuffer to user space. */ | ||
83 | int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, | ||
84 | struct drm_device *dev, uint32_t handle, uint64_t *offset); | ||
85 | |||
86 | /* page fault handler and mmap fault address(virtual) to physical memory. */ | ||
87 | int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | ||
88 | |||
89 | /* | ||
90 | * mmap the physically continuous memory that a gem object contains | ||
91 | * to user space. | ||
92 | */ | ||
93 | int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, | ||
94 | struct drm_file *file_priv); | ||
95 | |||
96 | /* set vm_flags and we can change the vm attribute to other one at here. */ | ||
97 | int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); | ||
98 | |||
99 | /* | ||
100 | * destroy memory region allocated. | ||
101 | * - a gem handle and physical memory region pointed by a gem object | ||
102 | * would be released by drm_gem_handle_delete(). | ||
103 | */ | ||
104 | int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv, | ||
105 | struct drm_device *dev, unsigned int handle); | ||
106 | |||
107 | #endif | ||
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index d3e8c540f778..1ca799a1e1fc 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c | |||
@@ -227,7 +227,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo, | |||
227 | default: | 227 | default: |
228 | DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " | 228 | DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " |
229 | "slave %d.\n", | 229 | "slave %d.\n", |
230 | val, adapter->name,dvo->slave_addr); | 230 | val, adapter->name, dvo->slave_addr); |
231 | goto fail; | 231 | goto fail; |
232 | } | 232 | } |
233 | 233 | ||
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 7eaa94e4ff06..4a036600e806 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c | |||
@@ -111,7 +111,7 @@ static char *ch7xxx_get_id(uint8_t vid) | |||
111 | /** Reads an 8 bit register */ | 111 | /** Reads an 8 bit register */ |
112 | static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | 112 | static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) |
113 | { | 113 | { |
114 | struct ch7xxx_priv *ch7xxx= dvo->dev_priv; | 114 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; |
115 | struct i2c_adapter *adapter = dvo->i2c_bus; | 115 | struct i2c_adapter *adapter = dvo->i2c_bus; |
116 | u8 out_buf[2]; | 116 | u8 out_buf[2]; |
117 | u8 in_buf[2]; | 117 | u8 in_buf[2]; |
@@ -303,7 +303,7 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) | |||
303 | 303 | ||
304 | for (i = 0; i < CH7xxx_NUM_REGS; i++) { | 304 | for (i = 0; i < CH7xxx_NUM_REGS; i++) { |
305 | uint8_t val; | 305 | uint8_t val; |
306 | if ((i % 8) == 0 ) | 306 | if ((i % 8) == 0) |
307 | DRM_LOG_KMS("\n %02X: ", i); | 307 | DRM_LOG_KMS("\n %02X: ", i); |
308 | ch7xxx_readb(dvo, i, &val); | 308 | ch7xxx_readb(dvo, i, &val); |
309 | DRM_LOG_KMS("%02X ", val); | 309 | DRM_LOG_KMS("%02X ", val); |
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index a12ed9414cc7..04f2893d5e3c 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c | |||
@@ -344,8 +344,8 @@ static void ivch_mode_set(struct intel_dvo_device *dvo, | |||
344 | (adjusted_mode->hdisplay - 1)) >> 2; | 344 | (adjusted_mode->hdisplay - 1)) >> 2; |
345 | y_ratio = (((mode->vdisplay - 1) << 16) / | 345 | y_ratio = (((mode->vdisplay - 1) << 16) / |
346 | (adjusted_mode->vdisplay - 1)) >> 2; | 346 | (adjusted_mode->vdisplay - 1)) >> 2; |
347 | ivch_write (dvo, VR42, x_ratio); | 347 | ivch_write(dvo, VR42, x_ratio); |
348 | ivch_write (dvo, VR41, y_ratio); | 348 | ivch_write(dvo, VR41, y_ratio); |
349 | } else { | 349 | } else { |
350 | vr01 &= ~VR01_PANEL_FIT_ENABLE; | 350 | vr01 &= ~VR01_PANEL_FIT_ENABLE; |
351 | vr40 &= ~VR40_CLOCK_GATING_ENABLE; | 351 | vr40 &= ~VR40_CLOCK_GATING_ENABLE; |
@@ -410,7 +410,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo) | |||
410 | } | 410 | } |
411 | } | 411 | } |
412 | 412 | ||
413 | struct intel_dvo_dev_ops ivch_ops= { | 413 | struct intel_dvo_dev_ops ivch_ops = { |
414 | .init = ivch_init, | 414 | .init = ivch_init, |
415 | .dpms = ivch_dpms, | 415 | .dpms = ivch_dpms, |
416 | .mode_valid = ivch_mode_valid, | 416 | .mode_valid = ivch_mode_valid, |
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index e4b4091df942..a0b13a6f619d 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c | |||
@@ -104,7 +104,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
104 | 104 | ||
105 | static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | 105 | static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) |
106 | { | 106 | { |
107 | struct sil164_priv *sil= dvo->dev_priv; | 107 | struct sil164_priv *sil = dvo->dev_priv; |
108 | struct i2c_adapter *adapter = dvo->i2c_bus; | 108 | struct i2c_adapter *adapter = dvo->i2c_bus; |
109 | uint8_t out_buf[2]; | 109 | uint8_t out_buf[2]; |
110 | struct i2c_msg msg = { | 110 | struct i2c_msg msg = { |
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 8ab2855bb544..aa2cd3ec54aa 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
@@ -56,7 +56,7 @@ | |||
56 | #define TFP410_CTL_2_MDI (1<<0) | 56 | #define TFP410_CTL_2_MDI (1<<0) |
57 | 57 | ||
58 | #define TFP410_CTL_3 0x0A | 58 | #define TFP410_CTL_3 0x0A |
59 | #define TFP410_CTL_3_DK_MASK (0x7<<5) | 59 | #define TFP410_CTL_3_DK_MASK (0x7<<5) |
60 | #define TFP410_CTL_3_DK (1<<5) | 60 | #define TFP410_CTL_3_DK (1<<5) |
61 | #define TFP410_CTL_3_DKEN (1<<4) | 61 | #define TFP410_CTL_3_DKEN (1<<4) |
62 | #define TFP410_CTL_3_CTL_MASK (0x7<<1) | 62 | #define TFP410_CTL_3_CTL_MASK (0x7<<1) |
@@ -225,12 +225,12 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo, | |||
225 | struct drm_display_mode *mode, | 225 | struct drm_display_mode *mode, |
226 | struct drm_display_mode *adjusted_mode) | 226 | struct drm_display_mode *adjusted_mode) |
227 | { | 227 | { |
228 | /* As long as the basics are set up, since we don't have clock dependencies | 228 | /* As long as the basics are set up, since we don't have clock dependencies |
229 | * in the mode setup, we can just leave the registers alone and everything | 229 | * in the mode setup, we can just leave the registers alone and everything |
230 | * will work fine. | 230 | * will work fine. |
231 | */ | 231 | */ |
232 | /* don't do much */ | 232 | /* don't do much */ |
233 | return; | 233 | return; |
234 | } | 234 | } |
235 | 235 | ||
236 | /* set the tfp410 power state */ | 236 | /* set the tfp410 power state */ |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3c395a59da35..8e95d66800b0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -98,12 +98,12 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj) | |||
98 | 98 | ||
99 | static const char *get_tiling_flag(struct drm_i915_gem_object *obj) | 99 | static const char *get_tiling_flag(struct drm_i915_gem_object *obj) |
100 | { | 100 | { |
101 | switch (obj->tiling_mode) { | 101 | switch (obj->tiling_mode) { |
102 | default: | 102 | default: |
103 | case I915_TILING_NONE: return " "; | 103 | case I915_TILING_NONE: return " "; |
104 | case I915_TILING_X: return "X"; | 104 | case I915_TILING_X: return "X"; |
105 | case I915_TILING_Y: return "Y"; | 105 | case I915_TILING_Y: return "Y"; |
106 | } | 106 | } |
107 | } | 107 | } |
108 | 108 | ||
109 | static const char *cache_level_str(int type) | 109 | static const char *cache_level_str(int type) |
@@ -217,7 +217,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
217 | ++mappable_count; \ | 217 | ++mappable_count; \ |
218 | } \ | 218 | } \ |
219 | } \ | 219 | } \ |
220 | } while(0) | 220 | } while (0) |
221 | 221 | ||
222 | static int i915_gem_object_info(struct seq_file *m, void* data) | 222 | static int i915_gem_object_info(struct seq_file *m, void* data) |
223 | { | 223 | { |
@@ -1293,12 +1293,12 @@ i915_wedged_read(struct file *filp, | |||
1293 | char buf[80]; | 1293 | char buf[80]; |
1294 | int len; | 1294 | int len; |
1295 | 1295 | ||
1296 | len = snprintf(buf, sizeof (buf), | 1296 | len = snprintf(buf, sizeof(buf), |
1297 | "wedged : %d\n", | 1297 | "wedged : %d\n", |
1298 | atomic_read(&dev_priv->mm.wedged)); | 1298 | atomic_read(&dev_priv->mm.wedged)); |
1299 | 1299 | ||
1300 | if (len > sizeof (buf)) | 1300 | if (len > sizeof(buf)) |
1301 | len = sizeof (buf); | 1301 | len = sizeof(buf); |
1302 | 1302 | ||
1303 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); | 1303 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); |
1304 | } | 1304 | } |
@@ -1314,7 +1314,7 @@ i915_wedged_write(struct file *filp, | |||
1314 | int val = 1; | 1314 | int val = 1; |
1315 | 1315 | ||
1316 | if (cnt > 0) { | 1316 | if (cnt > 0) { |
1317 | if (cnt > sizeof (buf) - 1) | 1317 | if (cnt > sizeof(buf) - 1) |
1318 | return -EINVAL; | 1318 | return -EINVAL; |
1319 | 1319 | ||
1320 | if (copy_from_user(buf, ubuf, cnt)) | 1320 | if (copy_from_user(buf, ubuf, cnt)) |
@@ -1357,11 +1357,11 @@ i915_max_freq_read(struct file *filp, | |||
1357 | char buf[80]; | 1357 | char buf[80]; |
1358 | int len; | 1358 | int len; |
1359 | 1359 | ||
1360 | len = snprintf(buf, sizeof (buf), | 1360 | len = snprintf(buf, sizeof(buf), |
1361 | "max freq: %d\n", dev_priv->max_delay * 50); | 1361 | "max freq: %d\n", dev_priv->max_delay * 50); |
1362 | 1362 | ||
1363 | if (len > sizeof (buf)) | 1363 | if (len > sizeof(buf)) |
1364 | len = sizeof (buf); | 1364 | len = sizeof(buf); |
1365 | 1365 | ||
1366 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); | 1366 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); |
1367 | } | 1367 | } |
@@ -1378,7 +1378,7 @@ i915_max_freq_write(struct file *filp, | |||
1378 | int val = 1; | 1378 | int val = 1; |
1379 | 1379 | ||
1380 | if (cnt > 0) { | 1380 | if (cnt > 0) { |
1381 | if (cnt > sizeof (buf) - 1) | 1381 | if (cnt > sizeof(buf) - 1) |
1382 | return -EINVAL; | 1382 | return -EINVAL; |
1383 | 1383 | ||
1384 | if (copy_from_user(buf, ubuf, cnt)) | 1384 | if (copy_from_user(buf, ubuf, cnt)) |
@@ -1432,12 +1432,12 @@ i915_cache_sharing_read(struct file *filp, | |||
1432 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); | 1432 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); |
1433 | mutex_unlock(&dev_priv->dev->struct_mutex); | 1433 | mutex_unlock(&dev_priv->dev->struct_mutex); |
1434 | 1434 | ||
1435 | len = snprintf(buf, sizeof (buf), | 1435 | len = snprintf(buf, sizeof(buf), |
1436 | "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> | 1436 | "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> |
1437 | GEN6_MBC_SNPCR_SHIFT); | 1437 | GEN6_MBC_SNPCR_SHIFT); |
1438 | 1438 | ||
1439 | if (len > sizeof (buf)) | 1439 | if (len > sizeof(buf)) |
1440 | len = sizeof (buf); | 1440 | len = sizeof(buf); |
1441 | 1441 | ||
1442 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); | 1442 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); |
1443 | } | 1443 | } |
@@ -1455,7 +1455,7 @@ i915_cache_sharing_write(struct file *filp, | |||
1455 | int val = 1; | 1455 | int val = 1; |
1456 | 1456 | ||
1457 | if (cnt > 0) { | 1457 | if (cnt > 0) { |
1458 | if (cnt > sizeof (buf) - 1) | 1458 | if (cnt > sizeof(buf) - 1) |
1459 | return -EINVAL; | 1459 | return -EINVAL; |
1460 | 1460 | ||
1461 | if (copy_from_user(buf, ubuf, cnt)) | 1461 | if (copy_from_user(buf, ubuf, cnt)) |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8a3942c4f099..2eac955dee18 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -884,7 +884,7 @@ static int i915_get_bridge_dev(struct drm_device *dev) | |||
884 | { | 884 | { |
885 | struct drm_i915_private *dev_priv = dev->dev_private; | 885 | struct drm_i915_private *dev_priv = dev->dev_private; |
886 | 886 | ||
887 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); | 887 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
888 | if (!dev_priv->bridge_dev) { | 888 | if (!dev_priv->bridge_dev) { |
889 | DRM_ERROR("bridge device not found\n"); | 889 | DRM_ERROR("bridge device not found\n"); |
890 | return -1; | 890 | return -1; |
@@ -1730,10 +1730,10 @@ static DEFINE_SPINLOCK(mchdev_lock); | |||
1730 | */ | 1730 | */ |
1731 | unsigned long i915_read_mch_val(void) | 1731 | unsigned long i915_read_mch_val(void) |
1732 | { | 1732 | { |
1733 | struct drm_i915_private *dev_priv; | 1733 | struct drm_i915_private *dev_priv; |
1734 | unsigned long chipset_val, graphics_val, ret = 0; | 1734 | unsigned long chipset_val, graphics_val, ret = 0; |
1735 | 1735 | ||
1736 | spin_lock(&mchdev_lock); | 1736 | spin_lock(&mchdev_lock); |
1737 | if (!i915_mch_dev) | 1737 | if (!i915_mch_dev) |
1738 | goto out_unlock; | 1738 | goto out_unlock; |
1739 | dev_priv = i915_mch_dev; | 1739 | dev_priv = i915_mch_dev; |
@@ -1744,9 +1744,9 @@ unsigned long i915_read_mch_val(void) | |||
1744 | ret = chipset_val + graphics_val; | 1744 | ret = chipset_val + graphics_val; |
1745 | 1745 | ||
1746 | out_unlock: | 1746 | out_unlock: |
1747 | spin_unlock(&mchdev_lock); | 1747 | spin_unlock(&mchdev_lock); |
1748 | 1748 | ||
1749 | return ret; | 1749 | return ret; |
1750 | } | 1750 | } |
1751 | EXPORT_SYMBOL_GPL(i915_read_mch_val); | 1751 | EXPORT_SYMBOL_GPL(i915_read_mch_val); |
1752 | 1752 | ||
@@ -1757,10 +1757,10 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val); | |||
1757 | */ | 1757 | */ |
1758 | bool i915_gpu_raise(void) | 1758 | bool i915_gpu_raise(void) |
1759 | { | 1759 | { |
1760 | struct drm_i915_private *dev_priv; | 1760 | struct drm_i915_private *dev_priv; |
1761 | bool ret = true; | 1761 | bool ret = true; |
1762 | 1762 | ||
1763 | spin_lock(&mchdev_lock); | 1763 | spin_lock(&mchdev_lock); |
1764 | if (!i915_mch_dev) { | 1764 | if (!i915_mch_dev) { |
1765 | ret = false; | 1765 | ret = false; |
1766 | goto out_unlock; | 1766 | goto out_unlock; |
@@ -1771,9 +1771,9 @@ bool i915_gpu_raise(void) | |||
1771 | dev_priv->max_delay--; | 1771 | dev_priv->max_delay--; |
1772 | 1772 | ||
1773 | out_unlock: | 1773 | out_unlock: |
1774 | spin_unlock(&mchdev_lock); | 1774 | spin_unlock(&mchdev_lock); |
1775 | 1775 | ||
1776 | return ret; | 1776 | return ret; |
1777 | } | 1777 | } |
1778 | EXPORT_SYMBOL_GPL(i915_gpu_raise); | 1778 | EXPORT_SYMBOL_GPL(i915_gpu_raise); |
1779 | 1779 | ||
@@ -1785,10 +1785,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise); | |||
1785 | */ | 1785 | */ |
1786 | bool i915_gpu_lower(void) | 1786 | bool i915_gpu_lower(void) |
1787 | { | 1787 | { |
1788 | struct drm_i915_private *dev_priv; | 1788 | struct drm_i915_private *dev_priv; |
1789 | bool ret = true; | 1789 | bool ret = true; |
1790 | 1790 | ||
1791 | spin_lock(&mchdev_lock); | 1791 | spin_lock(&mchdev_lock); |
1792 | if (!i915_mch_dev) { | 1792 | if (!i915_mch_dev) { |
1793 | ret = false; | 1793 | ret = false; |
1794 | goto out_unlock; | 1794 | goto out_unlock; |
@@ -1799,9 +1799,9 @@ bool i915_gpu_lower(void) | |||
1799 | dev_priv->max_delay++; | 1799 | dev_priv->max_delay++; |
1800 | 1800 | ||
1801 | out_unlock: | 1801 | out_unlock: |
1802 | spin_unlock(&mchdev_lock); | 1802 | spin_unlock(&mchdev_lock); |
1803 | 1803 | ||
1804 | return ret; | 1804 | return ret; |
1805 | } | 1805 | } |
1806 | EXPORT_SYMBOL_GPL(i915_gpu_lower); | 1806 | EXPORT_SYMBOL_GPL(i915_gpu_lower); |
1807 | 1807 | ||
@@ -1812,10 +1812,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower); | |||
1812 | */ | 1812 | */ |
1813 | bool i915_gpu_busy(void) | 1813 | bool i915_gpu_busy(void) |
1814 | { | 1814 | { |
1815 | struct drm_i915_private *dev_priv; | 1815 | struct drm_i915_private *dev_priv; |
1816 | bool ret = false; | 1816 | bool ret = false; |
1817 | 1817 | ||
1818 | spin_lock(&mchdev_lock); | 1818 | spin_lock(&mchdev_lock); |
1819 | if (!i915_mch_dev) | 1819 | if (!i915_mch_dev) |
1820 | goto out_unlock; | 1820 | goto out_unlock; |
1821 | dev_priv = i915_mch_dev; | 1821 | dev_priv = i915_mch_dev; |
@@ -1823,9 +1823,9 @@ bool i915_gpu_busy(void) | |||
1823 | ret = dev_priv->busy; | 1823 | ret = dev_priv->busy; |
1824 | 1824 | ||
1825 | out_unlock: | 1825 | out_unlock: |
1826 | spin_unlock(&mchdev_lock); | 1826 | spin_unlock(&mchdev_lock); |
1827 | 1827 | ||
1828 | return ret; | 1828 | return ret; |
1829 | } | 1829 | } |
1830 | EXPORT_SYMBOL_GPL(i915_gpu_busy); | 1830 | EXPORT_SYMBOL_GPL(i915_gpu_busy); |
1831 | 1831 | ||
@@ -1837,10 +1837,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy); | |||
1837 | */ | 1837 | */ |
1838 | bool i915_gpu_turbo_disable(void) | 1838 | bool i915_gpu_turbo_disable(void) |
1839 | { | 1839 | { |
1840 | struct drm_i915_private *dev_priv; | 1840 | struct drm_i915_private *dev_priv; |
1841 | bool ret = true; | 1841 | bool ret = true; |
1842 | 1842 | ||
1843 | spin_lock(&mchdev_lock); | 1843 | spin_lock(&mchdev_lock); |
1844 | if (!i915_mch_dev) { | 1844 | if (!i915_mch_dev) { |
1845 | ret = false; | 1845 | ret = false; |
1846 | goto out_unlock; | 1846 | goto out_unlock; |
@@ -1853,9 +1853,9 @@ bool i915_gpu_turbo_disable(void) | |||
1853 | ret = false; | 1853 | ret = false; |
1854 | 1854 | ||
1855 | out_unlock: | 1855 | out_unlock: |
1856 | spin_unlock(&mchdev_lock); | 1856 | spin_unlock(&mchdev_lock); |
1857 | 1857 | ||
1858 | return ret; | 1858 | return ret; |
1859 | } | 1859 | } |
1860 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); | 1860 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); |
1861 | 1861 | ||
@@ -1948,7 +1948,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1948 | 1948 | ||
1949 | agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | 1949 | agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
1950 | 1950 | ||
1951 | dev_priv->mm.gtt_mapping = | 1951 | dev_priv->mm.gtt_mapping = |
1952 | io_mapping_create_wc(dev->agp->base, agp_size); | 1952 | io_mapping_create_wc(dev->agp->base, agp_size); |
1953 | if (dev_priv->mm.gtt_mapping == NULL) { | 1953 | if (dev_priv->mm.gtt_mapping == NULL) { |
1954 | ret = -EIO; | 1954 | ret = -EIO; |
@@ -2035,7 +2035,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2035 | spin_lock_init(&dev_priv->error_lock); | 2035 | spin_lock_init(&dev_priv->error_lock); |
2036 | spin_lock_init(&dev_priv->rps_lock); | 2036 | spin_lock_init(&dev_priv->rps_lock); |
2037 | 2037 | ||
2038 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) | 2038 | if (IS_IVYBRIDGE(dev)) |
2039 | dev_priv->num_pipe = 3; | ||
2040 | else if (IS_MOBILE(dev) || !IS_GEN2(dev)) | ||
2039 | dev_priv->num_pipe = 2; | 2041 | dev_priv->num_pipe = 2; |
2040 | else | 2042 | else |
2041 | dev_priv->num_pipe = 1; | 2043 | dev_priv->num_pipe = 1; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f07e4252b708..4c8d681c2151 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -79,11 +79,11 @@ MODULE_PARM_DESC(lvds_downclock, | |||
79 | "Use panel (LVDS/eDP) downclocking for power savings " | 79 | "Use panel (LVDS/eDP) downclocking for power savings " |
80 | "(default: false)"); | 80 | "(default: false)"); |
81 | 81 | ||
82 | unsigned int i915_panel_use_ssc __read_mostly = 1; | 82 | unsigned int i915_panel_use_ssc __read_mostly = -1; |
83 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); | 83 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); |
84 | MODULE_PARM_DESC(lvds_use_ssc, | 84 | MODULE_PARM_DESC(lvds_use_ssc, |
85 | "Use Spread Spectrum Clock with panels [LVDS/eDP] " | 85 | "Use Spread Spectrum Clock with panels [LVDS/eDP] " |
86 | "(default: true)"); | 86 | "(default: auto from VBT)"); |
87 | 87 | ||
88 | int i915_vbt_sdvo_panel_type __read_mostly = -1; | 88 | int i915_vbt_sdvo_panel_type __read_mostly = -1; |
89 | module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); | 89 | module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); |
@@ -294,7 +294,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist); | |||
294 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 | 294 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
295 | #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 | 295 | #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 |
296 | 296 | ||
297 | void intel_detect_pch (struct drm_device *dev) | 297 | void intel_detect_pch(struct drm_device *dev) |
298 | { | 298 | { |
299 | struct drm_i915_private *dev_priv = dev->dev_private; | 299 | struct drm_i915_private *dev_priv = dev->dev_private; |
300 | struct pci_dev *pch; | 300 | struct pci_dev *pch; |
@@ -377,7 +377,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | |||
377 | 377 | ||
378 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | 378 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) |
379 | { | 379 | { |
380 | if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) { | 380 | if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { |
381 | int loop = 500; | 381 | int loop = 500; |
382 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | 382 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); |
383 | while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { | 383 | while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { |
@@ -471,6 +471,9 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
471 | error = i915_gem_init_ringbuffer(dev); | 471 | error = i915_gem_init_ringbuffer(dev); |
472 | mutex_unlock(&dev->struct_mutex); | 472 | mutex_unlock(&dev->struct_mutex); |
473 | 473 | ||
474 | if (HAS_PCH_SPLIT(dev)) | ||
475 | ironlake_init_pch_refclk(dev); | ||
476 | |||
474 | drm_mode_config_reset(dev); | 477 | drm_mode_config_reset(dev); |
475 | drm_irq_install(dev); | 478 | drm_irq_install(dev); |
476 | 479 | ||
@@ -770,12 +773,12 @@ static int i915_pm_poweroff(struct device *dev) | |||
770 | } | 773 | } |
771 | 774 | ||
772 | static const struct dev_pm_ops i915_pm_ops = { | 775 | static const struct dev_pm_ops i915_pm_ops = { |
773 | .suspend = i915_pm_suspend, | 776 | .suspend = i915_pm_suspend, |
774 | .resume = i915_pm_resume, | 777 | .resume = i915_pm_resume, |
775 | .freeze = i915_pm_freeze, | 778 | .freeze = i915_pm_freeze, |
776 | .thaw = i915_pm_thaw, | 779 | .thaw = i915_pm_thaw, |
777 | .poweroff = i915_pm_poweroff, | 780 | .poweroff = i915_pm_poweroff, |
778 | .restore = i915_pm_resume, | 781 | .restore = i915_pm_resume, |
779 | }; | 782 | }; |
780 | 783 | ||
781 | static struct vm_operations_struct i915_gem_vm_ops = { | 784 | static struct vm_operations_struct i915_gem_vm_ops = { |
@@ -895,3 +898,43 @@ module_exit(i915_exit); | |||
895 | MODULE_AUTHOR(DRIVER_AUTHOR); | 898 | MODULE_AUTHOR(DRIVER_AUTHOR); |
896 | MODULE_DESCRIPTION(DRIVER_DESC); | 899 | MODULE_DESCRIPTION(DRIVER_DESC); |
897 | MODULE_LICENSE("GPL and additional rights"); | 900 | MODULE_LICENSE("GPL and additional rights"); |
901 | |||
902 | /* We give fast paths for the really cool registers */ | ||
903 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ | ||
904 | (((dev_priv)->info->gen >= 6) && \ | ||
905 | ((reg) < 0x40000) && \ | ||
906 | ((reg) != FORCEWAKE)) | ||
907 | |||
908 | #define __i915_read(x, y) \ | ||
909 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | ||
910 | u##x val = 0; \ | ||
911 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
912 | gen6_gt_force_wake_get(dev_priv); \ | ||
913 | val = read##y(dev_priv->regs + reg); \ | ||
914 | gen6_gt_force_wake_put(dev_priv); \ | ||
915 | } else { \ | ||
916 | val = read##y(dev_priv->regs + reg); \ | ||
917 | } \ | ||
918 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ | ||
919 | return val; \ | ||
920 | } | ||
921 | |||
922 | __i915_read(8, b) | ||
923 | __i915_read(16, w) | ||
924 | __i915_read(32, l) | ||
925 | __i915_read(64, q) | ||
926 | #undef __i915_read | ||
927 | |||
928 | #define __i915_write(x, y) \ | ||
929 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | ||
930 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | ||
931 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
932 | __gen6_gt_wait_for_fifo(dev_priv); \ | ||
933 | } \ | ||
934 | write##y(val, dev_priv->regs + reg); \ | ||
935 | } | ||
936 | __i915_write(8, b) | ||
937 | __i915_write(16, w) | ||
938 | __i915_write(32, l) | ||
939 | __i915_write(64, q) | ||
940 | #undef __i915_write | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7916bd97d5c1..06a37f4fd74b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -139,7 +139,6 @@ struct sdvo_device_mapping { | |||
139 | u8 slave_addr; | 139 | u8 slave_addr; |
140 | u8 dvo_wiring; | 140 | u8 dvo_wiring; |
141 | u8 i2c_pin; | 141 | u8 i2c_pin; |
142 | u8 i2c_speed; | ||
143 | u8 ddc_pin; | 142 | u8 ddc_pin; |
144 | }; | 143 | }; |
145 | 144 | ||
@@ -209,6 +208,8 @@ struct drm_i915_display_funcs { | |||
209 | struct drm_display_mode *adjusted_mode, | 208 | struct drm_display_mode *adjusted_mode, |
210 | int x, int y, | 209 | int x, int y, |
211 | struct drm_framebuffer *old_fb); | 210 | struct drm_framebuffer *old_fb); |
211 | void (*write_eld)(struct drm_connector *connector, | ||
212 | struct drm_crtc *crtc); | ||
212 | void (*fdi_link_train)(struct drm_crtc *crtc); | 213 | void (*fdi_link_train)(struct drm_crtc *crtc); |
213 | void (*init_clock_gating)(struct drm_device *dev); | 214 | void (*init_clock_gating)(struct drm_device *dev); |
214 | void (*init_pch_clock_gating)(struct drm_device *dev); | 215 | void (*init_pch_clock_gating)(struct drm_device *dev); |
@@ -226,26 +227,26 @@ struct drm_i915_display_funcs { | |||
226 | 227 | ||
227 | struct intel_device_info { | 228 | struct intel_device_info { |
228 | u8 gen; | 229 | u8 gen; |
229 | u8 is_mobile : 1; | 230 | u8 is_mobile:1; |
230 | u8 is_i85x : 1; | 231 | u8 is_i85x:1; |
231 | u8 is_i915g : 1; | 232 | u8 is_i915g:1; |
232 | u8 is_i945gm : 1; | 233 | u8 is_i945gm:1; |
233 | u8 is_g33 : 1; | 234 | u8 is_g33:1; |
234 | u8 need_gfx_hws : 1; | 235 | u8 need_gfx_hws:1; |
235 | u8 is_g4x : 1; | 236 | u8 is_g4x:1; |
236 | u8 is_pineview : 1; | 237 | u8 is_pineview:1; |
237 | u8 is_broadwater : 1; | 238 | u8 is_broadwater:1; |
238 | u8 is_crestline : 1; | 239 | u8 is_crestline:1; |
239 | u8 is_ivybridge : 1; | 240 | u8 is_ivybridge:1; |
240 | u8 has_fbc : 1; | 241 | u8 has_fbc:1; |
241 | u8 has_pipe_cxsr : 1; | 242 | u8 has_pipe_cxsr:1; |
242 | u8 has_hotplug : 1; | 243 | u8 has_hotplug:1; |
243 | u8 cursor_needs_physical : 1; | 244 | u8 cursor_needs_physical:1; |
244 | u8 has_overlay : 1; | 245 | u8 has_overlay:1; |
245 | u8 overlay_needs_physical : 1; | 246 | u8 overlay_needs_physical:1; |
246 | u8 supports_tv : 1; | 247 | u8 supports_tv:1; |
247 | u8 has_bsd_ring : 1; | 248 | u8 has_bsd_ring:1; |
248 | u8 has_blt_ring : 1; | 249 | u8 has_blt_ring:1; |
249 | }; | 250 | }; |
250 | 251 | ||
251 | enum no_fbc_reason { | 252 | enum no_fbc_reason { |
@@ -347,7 +348,6 @@ typedef struct drm_i915_private { | |||
347 | /* LVDS info */ | 348 | /* LVDS info */ |
348 | int backlight_level; /* restore backlight to this value */ | 349 | int backlight_level; /* restore backlight to this value */ |
349 | bool backlight_enabled; | 350 | bool backlight_enabled; |
350 | struct drm_display_mode *panel_fixed_mode; | ||
351 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ | 351 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
352 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ | 352 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
353 | 353 | ||
@@ -357,6 +357,7 @@ typedef struct drm_i915_private { | |||
357 | unsigned int lvds_vbt:1; | 357 | unsigned int lvds_vbt:1; |
358 | unsigned int int_crt_support:1; | 358 | unsigned int int_crt_support:1; |
359 | unsigned int lvds_use_ssc:1; | 359 | unsigned int lvds_use_ssc:1; |
360 | unsigned int display_clock_mode:1; | ||
360 | int lvds_ssc_freq; | 361 | int lvds_ssc_freq; |
361 | struct { | 362 | struct { |
362 | int rate; | 363 | int rate; |
@@ -672,10 +673,9 @@ typedef struct drm_i915_private { | |||
672 | unsigned int lvds_border_bits; | 673 | unsigned int lvds_border_bits; |
673 | /* Panel fitter placement and size for Ironlake+ */ | 674 | /* Panel fitter placement and size for Ironlake+ */ |
674 | u32 pch_pf_pos, pch_pf_size; | 675 | u32 pch_pf_pos, pch_pf_size; |
675 | int panel_t3, panel_t12; | ||
676 | 676 | ||
677 | struct drm_crtc *plane_to_crtc_mapping[2]; | 677 | struct drm_crtc *plane_to_crtc_mapping[3]; |
678 | struct drm_crtc *pipe_to_crtc_mapping[2]; | 678 | struct drm_crtc *pipe_to_crtc_mapping[3]; |
679 | wait_queue_head_t pending_flip_queue; | 679 | wait_queue_head_t pending_flip_queue; |
680 | bool flip_pending_is_done; | 680 | bool flip_pending_is_done; |
681 | 681 | ||
@@ -759,19 +759,19 @@ struct drm_i915_gem_object { | |||
759 | * (has pending rendering), and is not set if it's on inactive (ready | 759 | * (has pending rendering), and is not set if it's on inactive (ready |
760 | * to be unbound). | 760 | * to be unbound). |
761 | */ | 761 | */ |
762 | unsigned int active : 1; | 762 | unsigned int active:1; |
763 | 763 | ||
764 | /** | 764 | /** |
765 | * This is set if the object has been written to since last bound | 765 | * This is set if the object has been written to since last bound |
766 | * to the GTT | 766 | * to the GTT |
767 | */ | 767 | */ |
768 | unsigned int dirty : 1; | 768 | unsigned int dirty:1; |
769 | 769 | ||
770 | /** | 770 | /** |
771 | * This is set if the object has been written to since the last | 771 | * This is set if the object has been written to since the last |
772 | * GPU flush. | 772 | * GPU flush. |
773 | */ | 773 | */ |
774 | unsigned int pending_gpu_write : 1; | 774 | unsigned int pending_gpu_write:1; |
775 | 775 | ||
776 | /** | 776 | /** |
777 | * Fence register bits (if any) for this object. Will be set | 777 | * Fence register bits (if any) for this object. Will be set |
@@ -780,18 +780,18 @@ struct drm_i915_gem_object { | |||
780 | * | 780 | * |
781 | * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) | 781 | * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) |
782 | */ | 782 | */ |
783 | signed int fence_reg : 5; | 783 | signed int fence_reg:5; |
784 | 784 | ||
785 | /** | 785 | /** |
786 | * Advice: are the backing pages purgeable? | 786 | * Advice: are the backing pages purgeable? |
787 | */ | 787 | */ |
788 | unsigned int madv : 2; | 788 | unsigned int madv:2; |
789 | 789 | ||
790 | /** | 790 | /** |
791 | * Current tiling mode for the object. | 791 | * Current tiling mode for the object. |
792 | */ | 792 | */ |
793 | unsigned int tiling_mode : 2; | 793 | unsigned int tiling_mode:2; |
794 | unsigned int tiling_changed : 1; | 794 | unsigned int tiling_changed:1; |
795 | 795 | ||
796 | /** How many users have pinned this object in GTT space. The following | 796 | /** How many users have pinned this object in GTT space. The following |
797 | * users can each hold at most one reference: pwrite/pread, pin_ioctl | 797 | * users can each hold at most one reference: pwrite/pread, pin_ioctl |
@@ -802,22 +802,22 @@ struct drm_i915_gem_object { | |||
802 | * | 802 | * |
803 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 | 803 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 |
804 | * bits with absolutely no headroom. So use 4 bits. */ | 804 | * bits with absolutely no headroom. So use 4 bits. */ |
805 | unsigned int pin_count : 4; | 805 | unsigned int pin_count:4; |
806 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf | 806 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
807 | 807 | ||
808 | /** | 808 | /** |
809 | * Is the object at the current location in the gtt mappable and | 809 | * Is the object at the current location in the gtt mappable and |
810 | * fenceable? Used to avoid costly recalculations. | 810 | * fenceable? Used to avoid costly recalculations. |
811 | */ | 811 | */ |
812 | unsigned int map_and_fenceable : 1; | 812 | unsigned int map_and_fenceable:1; |
813 | 813 | ||
814 | /** | 814 | /** |
815 | * Whether the current gtt mapping needs to be mappable (and isn't just | 815 | * Whether the current gtt mapping needs to be mappable (and isn't just |
816 | * mappable by accident). Track pin and fault separate for a more | 816 | * mappable by accident). Track pin and fault separate for a more |
817 | * accurate mappable working set. | 817 | * accurate mappable working set. |
818 | */ | 818 | */ |
819 | unsigned int fault_mappable : 1; | 819 | unsigned int fault_mappable:1; |
820 | unsigned int pin_mappable : 1; | 820 | unsigned int pin_mappable:1; |
821 | 821 | ||
822 | /* | 822 | /* |
823 | * Is the GPU currently using a fence to access this buffer, | 823 | * Is the GPU currently using a fence to access this buffer, |
@@ -1056,7 +1056,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | |||
1056 | void | 1056 | void |
1057 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | 1057 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
1058 | 1058 | ||
1059 | void intel_enable_asle (struct drm_device *dev); | 1059 | void intel_enable_asle(struct drm_device *dev); |
1060 | 1060 | ||
1061 | #ifdef CONFIG_DEBUG_FS | 1061 | #ifdef CONFIG_DEBUG_FS |
1062 | extern void i915_destroy_error_state(struct drm_device *dev); | 1062 | extern void i915_destroy_error_state(struct drm_device *dev); |
@@ -1146,7 +1146,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv, | |||
1146 | int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, | 1146 | int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, |
1147 | uint32_t handle, uint64_t *offset); | 1147 | uint32_t handle, uint64_t *offset); |
1148 | int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, | 1148 | int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, |
1149 | uint32_t handle); | 1149 | uint32_t handle); |
1150 | /** | 1150 | /** |
1151 | * Returns true if seq1 is later than seq2. | 1151 | * Returns true if seq1 is later than seq2. |
1152 | */ | 1152 | */ |
@@ -1301,10 +1301,11 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | |||
1301 | extern bool intel_fbc_enabled(struct drm_device *dev); | 1301 | extern bool intel_fbc_enabled(struct drm_device *dev); |
1302 | extern void intel_disable_fbc(struct drm_device *dev); | 1302 | extern void intel_disable_fbc(struct drm_device *dev); |
1303 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | 1303 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1304 | extern void ironlake_init_pch_refclk(struct drm_device *dev); | ||
1304 | extern void ironlake_enable_rc6(struct drm_device *dev); | 1305 | extern void ironlake_enable_rc6(struct drm_device *dev); |
1305 | extern void gen6_set_rps(struct drm_device *dev, u8 val); | 1306 | extern void gen6_set_rps(struct drm_device *dev, u8 val); |
1306 | extern void intel_detect_pch (struct drm_device *dev); | 1307 | extern void intel_detect_pch(struct drm_device *dev); |
1307 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | 1308 | extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); |
1308 | 1309 | ||
1309 | /* overlay */ | 1310 | /* overlay */ |
1310 | #ifdef CONFIG_DEBUG_FS | 1311 | #ifdef CONFIG_DEBUG_FS |
@@ -1354,18 +1355,7 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); | |||
1354 | ((reg) != FORCEWAKE)) | 1355 | ((reg) != FORCEWAKE)) |
1355 | 1356 | ||
1356 | #define __i915_read(x, y) \ | 1357 | #define __i915_read(x, y) \ |
1357 | static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 1358 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); |
1358 | u##x val = 0; \ | ||
1359 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
1360 | gen6_gt_force_wake_get(dev_priv); \ | ||
1361 | val = read##y(dev_priv->regs + reg); \ | ||
1362 | gen6_gt_force_wake_put(dev_priv); \ | ||
1363 | } else { \ | ||
1364 | val = read##y(dev_priv->regs + reg); \ | ||
1365 | } \ | ||
1366 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ | ||
1367 | return val; \ | ||
1368 | } | ||
1369 | 1359 | ||
1370 | __i915_read(8, b) | 1360 | __i915_read(8, b) |
1371 | __i915_read(16, w) | 1361 | __i915_read(16, w) |
@@ -1374,13 +1364,8 @@ __i915_read(64, q) | |||
1374 | #undef __i915_read | 1364 | #undef __i915_read |
1375 | 1365 | ||
1376 | #define __i915_write(x, y) \ | 1366 | #define __i915_write(x, y) \ |
1377 | static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | 1367 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); |
1378 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | 1368 | |
1379 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
1380 | __gen6_gt_wait_for_fifo(dev_priv); \ | ||
1381 | } \ | ||
1382 | write##y(val, dev_priv->regs + reg); \ | ||
1383 | } | ||
1384 | __i915_write(8, b) | 1369 | __i915_write(8, b) |
1385 | __i915_write(16, w) | 1370 | __i915_write(16, w) |
1386 | __i915_write(32, l) | 1371 | __i915_write(32, l) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a546a71fb060..6651c36b6e8a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -179,7 +179,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
179 | mutex_unlock(&dev->struct_mutex); | 179 | mutex_unlock(&dev->struct_mutex); |
180 | 180 | ||
181 | args->aper_size = dev_priv->mm.gtt_total; | 181 | args->aper_size = dev_priv->mm.gtt_total; |
182 | args->aper_available_size = args->aper_size -pinned; | 182 | args->aper_available_size = args->aper_size - pinned; |
183 | 183 | ||
184 | return 0; | 184 | return 0; |
185 | } | 185 | } |
@@ -195,6 +195,8 @@ i915_gem_create(struct drm_file *file, | |||
195 | u32 handle; | 195 | u32 handle; |
196 | 196 | ||
197 | size = roundup(size, PAGE_SIZE); | 197 | size = roundup(size, PAGE_SIZE); |
198 | if (size == 0) | ||
199 | return -EINVAL; | ||
198 | 200 | ||
199 | /* Allocate the new object */ | 201 | /* Allocate the new object */ |
200 | obj = i915_gem_alloc_object(dev, size); | 202 | obj = i915_gem_alloc_object(dev, size); |
@@ -800,11 +802,11 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, | |||
800 | if (IS_ERR(page)) | 802 | if (IS_ERR(page)) |
801 | return PTR_ERR(page); | 803 | return PTR_ERR(page); |
802 | 804 | ||
803 | vaddr = kmap_atomic(page, KM_USER0); | 805 | vaddr = kmap_atomic(page); |
804 | ret = __copy_from_user_inatomic(vaddr + page_offset, | 806 | ret = __copy_from_user_inatomic(vaddr + page_offset, |
805 | user_data, | 807 | user_data, |
806 | page_length); | 808 | page_length); |
807 | kunmap_atomic(vaddr, KM_USER0); | 809 | kunmap_atomic(vaddr); |
808 | 810 | ||
809 | set_page_dirty(page); | 811 | set_page_dirty(page); |
810 | mark_page_accessed(page); | 812 | mark_page_accessed(page); |
@@ -1265,74 +1267,6 @@ out: | |||
1265 | } | 1267 | } |
1266 | 1268 | ||
1267 | /** | 1269 | /** |
1268 | * i915_gem_create_mmap_offset - create a fake mmap offset for an object | ||
1269 | * @obj: obj in question | ||
1270 | * | ||
1271 | * GEM memory mapping works by handing back to userspace a fake mmap offset | ||
1272 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | ||
1273 | * up the object based on the offset and sets up the various memory mapping | ||
1274 | * structures. | ||
1275 | * | ||
1276 | * This routine allocates and attaches a fake offset for @obj. | ||
1277 | */ | ||
1278 | static int | ||
1279 | i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj) | ||
1280 | { | ||
1281 | struct drm_device *dev = obj->base.dev; | ||
1282 | struct drm_gem_mm *mm = dev->mm_private; | ||
1283 | struct drm_map_list *list; | ||
1284 | struct drm_local_map *map; | ||
1285 | int ret = 0; | ||
1286 | |||
1287 | /* Set the object up for mmap'ing */ | ||
1288 | list = &obj->base.map_list; | ||
1289 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); | ||
1290 | if (!list->map) | ||
1291 | return -ENOMEM; | ||
1292 | |||
1293 | map = list->map; | ||
1294 | map->type = _DRM_GEM; | ||
1295 | map->size = obj->base.size; | ||
1296 | map->handle = obj; | ||
1297 | |||
1298 | /* Get a DRM GEM mmap offset allocated... */ | ||
1299 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | ||
1300 | obj->base.size / PAGE_SIZE, | ||
1301 | 0, 0); | ||
1302 | if (!list->file_offset_node) { | ||
1303 | DRM_ERROR("failed to allocate offset for bo %d\n", | ||
1304 | obj->base.name); | ||
1305 | ret = -ENOSPC; | ||
1306 | goto out_free_list; | ||
1307 | } | ||
1308 | |||
1309 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | ||
1310 | obj->base.size / PAGE_SIZE, | ||
1311 | 0); | ||
1312 | if (!list->file_offset_node) { | ||
1313 | ret = -ENOMEM; | ||
1314 | goto out_free_list; | ||
1315 | } | ||
1316 | |||
1317 | list->hash.key = list->file_offset_node->start; | ||
1318 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); | ||
1319 | if (ret) { | ||
1320 | DRM_ERROR("failed to add to map hash\n"); | ||
1321 | goto out_free_mm; | ||
1322 | } | ||
1323 | |||
1324 | return 0; | ||
1325 | |||
1326 | out_free_mm: | ||
1327 | drm_mm_put_block(list->file_offset_node); | ||
1328 | out_free_list: | ||
1329 | kfree(list->map); | ||
1330 | list->map = NULL; | ||
1331 | |||
1332 | return ret; | ||
1333 | } | ||
1334 | |||
1335 | /** | ||
1336 | * i915_gem_release_mmap - remove physical page mappings | 1270 | * i915_gem_release_mmap - remove physical page mappings |
1337 | * @obj: obj in question | 1271 | * @obj: obj in question |
1338 | * | 1272 | * |
@@ -1360,19 +1294,6 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) | |||
1360 | obj->fault_mappable = false; | 1294 | obj->fault_mappable = false; |
1361 | } | 1295 | } |
1362 | 1296 | ||
1363 | static void | ||
1364 | i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj) | ||
1365 | { | ||
1366 | struct drm_device *dev = obj->base.dev; | ||
1367 | struct drm_gem_mm *mm = dev->mm_private; | ||
1368 | struct drm_map_list *list = &obj->base.map_list; | ||
1369 | |||
1370 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | ||
1371 | drm_mm_put_block(list->file_offset_node); | ||
1372 | kfree(list->map); | ||
1373 | list->map = NULL; | ||
1374 | } | ||
1375 | |||
1376 | static uint32_t | 1297 | static uint32_t |
1377 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) | 1298 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) |
1378 | { | 1299 | { |
@@ -1485,7 +1406,7 @@ i915_gem_mmap_gtt(struct drm_file *file, | |||
1485 | } | 1406 | } |
1486 | 1407 | ||
1487 | if (!obj->base.map_list.map) { | 1408 | if (!obj->base.map_list.map) { |
1488 | ret = i915_gem_create_mmap_offset(obj); | 1409 | ret = drm_gem_create_mmap_offset(&obj->base); |
1489 | if (ret) | 1410 | if (ret) |
1490 | goto out; | 1411 | goto out; |
1491 | } | 1412 | } |
@@ -1557,7 +1478,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, | |||
1557 | obj->pages[i] = page; | 1478 | obj->pages[i] = page; |
1558 | } | 1479 | } |
1559 | 1480 | ||
1560 | if (obj->tiling_mode != I915_TILING_NONE) | 1481 | if (i915_gem_object_needs_bit17_swizzle(obj)) |
1561 | i915_gem_object_do_bit_17_swizzle(obj); | 1482 | i915_gem_object_do_bit_17_swizzle(obj); |
1562 | 1483 | ||
1563 | return 0; | 1484 | return 0; |
@@ -1579,7 +1500,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) | |||
1579 | 1500 | ||
1580 | BUG_ON(obj->madv == __I915_MADV_PURGED); | 1501 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
1581 | 1502 | ||
1582 | if (obj->tiling_mode != I915_TILING_NONE) | 1503 | if (i915_gem_object_needs_bit17_swizzle(obj)) |
1583 | i915_gem_object_save_bit_17_swizzle(obj); | 1504 | i915_gem_object_save_bit_17_swizzle(obj); |
1584 | 1505 | ||
1585 | if (obj->madv == I915_MADV_DONTNEED) | 1506 | if (obj->madv == I915_MADV_DONTNEED) |
@@ -1856,7 +1777,7 @@ void i915_gem_reset(struct drm_device *dev) | |||
1856 | * lost bo to the inactive list. | 1777 | * lost bo to the inactive list. |
1857 | */ | 1778 | */ |
1858 | while (!list_empty(&dev_priv->mm.flushing_list)) { | 1779 | while (!list_empty(&dev_priv->mm.flushing_list)) { |
1859 | obj= list_first_entry(&dev_priv->mm.flushing_list, | 1780 | obj = list_first_entry(&dev_priv->mm.flushing_list, |
1860 | struct drm_i915_gem_object, | 1781 | struct drm_i915_gem_object, |
1861 | mm_list); | 1782 | mm_list); |
1862 | 1783 | ||
@@ -1922,7 +1843,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) | |||
1922 | while (!list_empty(&ring->active_list)) { | 1843 | while (!list_empty(&ring->active_list)) { |
1923 | struct drm_i915_gem_object *obj; | 1844 | struct drm_i915_gem_object *obj; |
1924 | 1845 | ||
1925 | obj= list_first_entry(&ring->active_list, | 1846 | obj = list_first_entry(&ring->active_list, |
1926 | struct drm_i915_gem_object, | 1847 | struct drm_i915_gem_object, |
1927 | ring_list); | 1848 | ring_list); |
1928 | 1849 | ||
@@ -2272,14 +2193,8 @@ int | |||
2272 | i915_gpu_idle(struct drm_device *dev) | 2193 | i915_gpu_idle(struct drm_device *dev) |
2273 | { | 2194 | { |
2274 | drm_i915_private_t *dev_priv = dev->dev_private; | 2195 | drm_i915_private_t *dev_priv = dev->dev_private; |
2275 | bool lists_empty; | ||
2276 | int ret, i; | 2196 | int ret, i; |
2277 | 2197 | ||
2278 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | ||
2279 | list_empty(&dev_priv->mm.active_list)); | ||
2280 | if (lists_empty) | ||
2281 | return 0; | ||
2282 | |||
2283 | /* Flush everything onto the inactive list. */ | 2198 | /* Flush everything onto the inactive list. */ |
2284 | for (i = 0; i < I915_NUM_RINGS; i++) { | 2199 | for (i = 0; i < I915_NUM_RINGS; i++) { |
2285 | ret = i915_ring_idle(&dev_priv->ring[i]); | 2200 | ret = i915_ring_idle(&dev_priv->ring[i]); |
@@ -2882,7 +2797,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2882 | 2797 | ||
2883 | fenceable = | 2798 | fenceable = |
2884 | obj->gtt_space->size == fence_size && | 2799 | obj->gtt_space->size == fence_size && |
2885 | (obj->gtt_space->start & (fence_alignment -1)) == 0; | 2800 | (obj->gtt_space->start & (fence_alignment - 1)) == 0; |
2886 | 2801 | ||
2887 | mappable = | 2802 | mappable = |
2888 | obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; | 2803 | obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; |
@@ -3598,7 +3513,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3598 | */ | 3513 | */ |
3599 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 3514 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
3600 | if (request) | 3515 | if (request) |
3601 | ret = i915_add_request(obj->ring, NULL,request); | 3516 | ret = i915_add_request(obj->ring, NULL, request); |
3602 | else | 3517 | else |
3603 | ret = -ENOMEM; | 3518 | ret = -ENOMEM; |
3604 | } | 3519 | } |
@@ -3623,7 +3538,7 @@ int | |||
3623 | i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | 3538 | i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
3624 | struct drm_file *file_priv) | 3539 | struct drm_file *file_priv) |
3625 | { | 3540 | { |
3626 | return i915_gem_ring_throttle(dev, file_priv); | 3541 | return i915_gem_ring_throttle(dev, file_priv); |
3627 | } | 3542 | } |
3628 | 3543 | ||
3629 | int | 3544 | int |
@@ -3752,7 +3667,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) | |||
3752 | trace_i915_gem_object_destroy(obj); | 3667 | trace_i915_gem_object_destroy(obj); |
3753 | 3668 | ||
3754 | if (obj->base.map_list.map) | 3669 | if (obj->base.map_list.map) |
3755 | i915_gem_free_mmap_offset(obj); | 3670 | drm_gem_free_mmap_offset(&obj->base); |
3756 | 3671 | ||
3757 | drm_gem_object_release(&obj->base); | 3672 | drm_gem_object_release(&obj->base); |
3758 | i915_gem_info_remove_obj(dev_priv, obj->base.size); | 3673 | i915_gem_info_remove_obj(dev_priv, obj->base.size); |
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 8da1899bd24f..cc93cac242d6 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -72,7 +72,7 @@ i915_verify_lists(struct drm_device *dev) | |||
72 | break; | 72 | break; |
73 | } else if (!obj->active || | 73 | } else if (!obj->active || |
74 | (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 || | 74 | (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 || |
75 | list_empty(&obj->gpu_write_list)){ | 75 | list_empty(&obj->gpu_write_list)) { |
76 | DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n", | 76 | DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n", |
77 | obj, | 77 | obj, |
78 | obj->active, | 78 | obj->active, |
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) | |||
157 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { | 157 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { |
158 | int i; | 158 | int i; |
159 | 159 | ||
160 | backing_map = kmap_atomic(obj->pages[page], KM_USER0); | 160 | backing_map = kmap_atomic(obj->pages[page]); |
161 | 161 | ||
162 | if (backing_map == NULL) { | 162 | if (backing_map == NULL) { |
163 | DRM_ERROR("failed to map backing page\n"); | 163 | DRM_ERROR("failed to map backing page\n"); |
@@ -181,13 +181,13 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) | |||
181 | } | 181 | } |
182 | } | 182 | } |
183 | } | 183 | } |
184 | kunmap_atomic(backing_map, KM_USER0); | 184 | kunmap_atomic(backing_map); |
185 | backing_map = NULL; | 185 | backing_map = NULL; |
186 | } | 186 | } |
187 | 187 | ||
188 | out: | 188 | out: |
189 | if (backing_map != NULL) | 189 | if (backing_map != NULL) |
190 | kunmap_atomic(backing_map, KM_USER0); | 190 | kunmap_atomic(backing_map); |
191 | iounmap(gtt_mapping); | 191 | iounmap(gtt_mapping); |
192 | 192 | ||
193 | /* give syslog time to catch up */ | 193 | /* give syslog time to catch up */ |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index da05a2692a75..ead5d00f91b0 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -122,7 +122,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
122 | goto found; | 122 | goto found; |
123 | } | 123 | } |
124 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | 124 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
125 | if (! obj->base.write_domain || obj->pin_count) | 125 | if (!obj->base.write_domain || obj->pin_count) |
126 | continue; | 126 | continue; |
127 | 127 | ||
128 | if (mark_free(obj, &unwind_list)) | 128 | if (mark_free(obj, &unwind_list)) |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 4934cf84c320..3693e83a97f3 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -784,7 +784,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | |||
784 | } | 784 | } |
785 | 785 | ||
786 | from->sync_seqno[idx] = seqno; | 786 | from->sync_seqno[idx] = seqno; |
787 | return intel_ring_sync(to, from, seqno - 1); | 787 | |
788 | return to->sync_to(to, from, seqno - 1); | ||
788 | } | 789 | } |
789 | 790 | ||
790 | static int | 791 | static int |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7a709cd8d543..6042c5e6d278 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -49,6 +49,28 @@ static unsigned int cache_level_to_agp_type(struct drm_device *dev, | |||
49 | } | 49 | } |
50 | } | 50 | } |
51 | 51 | ||
52 | static bool do_idling(struct drm_i915_private *dev_priv) | ||
53 | { | ||
54 | bool ret = dev_priv->mm.interruptible; | ||
55 | |||
56 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { | ||
57 | dev_priv->mm.interruptible = false; | ||
58 | if (i915_gpu_idle(dev_priv->dev)) { | ||
59 | DRM_ERROR("Couldn't idle GPU\n"); | ||
60 | /* Wait a bit, in hopes it avoids the hang */ | ||
61 | udelay(10); | ||
62 | } | ||
63 | } | ||
64 | |||
65 | return ret; | ||
66 | } | ||
67 | |||
68 | static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) | ||
69 | { | ||
70 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) | ||
71 | dev_priv->mm.interruptible = interruptible; | ||
72 | } | ||
73 | |||
52 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 74 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
53 | { | 75 | { |
54 | struct drm_i915_private *dev_priv = dev->dev_private; | 76 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -117,6 +139,12 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, | |||
117 | 139 | ||
118 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | 140 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
119 | { | 141 | { |
142 | struct drm_device *dev = obj->base.dev; | ||
143 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
144 | bool interruptible; | ||
145 | |||
146 | interruptible = do_idling(dev_priv); | ||
147 | |||
120 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, | 148 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, |
121 | obj->base.size >> PAGE_SHIFT); | 149 | obj->base.size >> PAGE_SHIFT); |
122 | 150 | ||
@@ -124,4 +152,6 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | |||
124 | intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); | 152 | intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); |
125 | obj->sg_list = NULL; | 153 | obj->sg_list = NULL; |
126 | } | 154 | } |
155 | |||
156 | undo_idling(dev_priv, interruptible); | ||
127 | } | 157 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 99c4faa59d8f..31d334d9d9da 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -92,7 +92,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
92 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | 92 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
94 | 94 | ||
95 | if (INTEL_INFO(dev)->gen >= 5) { | 95 | if (INTEL_INFO(dev)->gen >= 6) { |
96 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | ||
97 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | ||
98 | } else if (IS_GEN5(dev)) { | ||
96 | /* On Ironlake whatever DRAM config, GPU always do | 99 | /* On Ironlake whatever DRAM config, GPU always do |
97 | * same swizzling setup. | 100 | * same swizzling setup. |
98 | */ | 101 | */ |
@@ -440,14 +443,9 @@ i915_gem_swizzle_page(struct page *page) | |||
440 | void | 443 | void |
441 | i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) | 444 | i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) |
442 | { | 445 | { |
443 | struct drm_device *dev = obj->base.dev; | ||
444 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
445 | int page_count = obj->base.size >> PAGE_SHIFT; | 446 | int page_count = obj->base.size >> PAGE_SHIFT; |
446 | int i; | 447 | int i; |
447 | 448 | ||
448 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | ||
449 | return; | ||
450 | |||
451 | if (obj->bit_17 == NULL) | 449 | if (obj->bit_17 == NULL) |
452 | return; | 450 | return; |
453 | 451 | ||
@@ -464,14 +462,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) | |||
464 | void | 462 | void |
465 | i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) | 463 | i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) |
466 | { | 464 | { |
467 | struct drm_device *dev = obj->base.dev; | ||
468 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
469 | int page_count = obj->base.size >> PAGE_SHIFT; | 465 | int page_count = obj->base.size >> PAGE_SHIFT; |
470 | int i; | 466 | int i; |
471 | 467 | ||
472 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | ||
473 | return; | ||
474 | |||
475 | if (obj->bit_17 == NULL) { | 468 | if (obj->bit_17 == NULL) { |
476 | obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * | 469 | obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * |
477 | sizeof(long), GFP_KERNEL); | 470 | sizeof(long), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9cbb0cd8f46a..9ee2729fe5c6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -383,6 +383,7 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
383 | pm_iir = dev_priv->pm_iir; | 383 | pm_iir = dev_priv->pm_iir; |
384 | dev_priv->pm_iir = 0; | 384 | dev_priv->pm_iir = 0; |
385 | pm_imr = I915_READ(GEN6_PMIMR); | 385 | pm_imr = I915_READ(GEN6_PMIMR); |
386 | I915_WRITE(GEN6_PMIMR, 0); | ||
386 | spin_unlock_irq(&dev_priv->rps_lock); | 387 | spin_unlock_irq(&dev_priv->rps_lock); |
387 | 388 | ||
388 | if (!pm_iir) | 389 | if (!pm_iir) |
@@ -420,7 +421,6 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
420 | * an *extremely* unlikely race with gen6_rps_enable() that is prevented | 421 | * an *extremely* unlikely race with gen6_rps_enable() that is prevented |
421 | * by holding struct_mutex for the duration of the write. | 422 | * by holding struct_mutex for the duration of the write. |
422 | */ | 423 | */ |
423 | I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir); | ||
424 | mutex_unlock(&dev_priv->dev->struct_mutex); | 424 | mutex_unlock(&dev_priv->dev->struct_mutex); |
425 | } | 425 | } |
426 | 426 | ||
@@ -536,8 +536,9 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) | |||
536 | unsigned long flags; | 536 | unsigned long flags; |
537 | spin_lock_irqsave(&dev_priv->rps_lock, flags); | 537 | spin_lock_irqsave(&dev_priv->rps_lock, flags); |
538 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); | 538 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); |
539 | I915_WRITE(GEN6_PMIMR, pm_iir); | ||
540 | dev_priv->pm_iir |= pm_iir; | 539 | dev_priv->pm_iir |= pm_iir; |
540 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); | ||
541 | POSTING_READ(GEN6_PMIMR); | ||
541 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); | 542 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); |
542 | queue_work(dev_priv->wq, &dev_priv->rps_work); | 543 | queue_work(dev_priv->wq, &dev_priv->rps_work); |
543 | } | 544 | } |
@@ -649,8 +650,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | |||
649 | unsigned long flags; | 650 | unsigned long flags; |
650 | spin_lock_irqsave(&dev_priv->rps_lock, flags); | 651 | spin_lock_irqsave(&dev_priv->rps_lock, flags); |
651 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); | 652 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); |
652 | I915_WRITE(GEN6_PMIMR, pm_iir); | ||
653 | dev_priv->pm_iir |= pm_iir; | 653 | dev_priv->pm_iir |= pm_iir; |
654 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); | ||
655 | POSTING_READ(GEN6_PMIMR); | ||
654 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); | 656 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); |
655 | queue_work(dev_priv->wq, &dev_priv->rps_work); | 657 | queue_work(dev_priv->wq, &dev_priv->rps_work); |
656 | } | 658 | } |
@@ -711,7 +713,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, | |||
711 | 713 | ||
712 | page_count = src->base.size / PAGE_SIZE; | 714 | page_count = src->base.size / PAGE_SIZE; |
713 | 715 | ||
714 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); | 716 | dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC); |
715 | if (dst == NULL) | 717 | if (dst == NULL) |
716 | return NULL; | 718 | return NULL; |
717 | 719 | ||
@@ -1493,7 +1495,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe) | |||
1493 | 1495 | ||
1494 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1496 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1495 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | 1497 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? |
1496 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | 1498 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); |
1497 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1499 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1498 | 1500 | ||
1499 | return 0; | 1501 | return 0; |
@@ -1541,7 +1543,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe) | |||
1541 | 1543 | ||
1542 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1544 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1543 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | 1545 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
1544 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | 1546 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); |
1545 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1547 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1546 | } | 1548 | } |
1547 | 1549 | ||
@@ -1777,6 +1779,26 @@ static void ironlake_irq_preinstall(struct drm_device *dev) | |||
1777 | POSTING_READ(SDEIER); | 1779 | POSTING_READ(SDEIER); |
1778 | } | 1780 | } |
1779 | 1781 | ||
1782 | /* | ||
1783 | * Enable digital hotplug on the PCH, and configure the DP short pulse | ||
1784 | * duration to 2ms (which is the minimum in the Display Port spec) | ||
1785 | * | ||
1786 | * This register is the same on all known PCH chips. | ||
1787 | */ | ||
1788 | |||
1789 | static void ironlake_enable_pch_hotplug(struct drm_device *dev) | ||
1790 | { | ||
1791 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1792 | u32 hotplug; | ||
1793 | |||
1794 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | ||
1795 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | ||
1796 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | ||
1797 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | ||
1798 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | ||
1799 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | ||
1800 | } | ||
1801 | |||
1780 | static int ironlake_irq_postinstall(struct drm_device *dev) | 1802 | static int ironlake_irq_postinstall(struct drm_device *dev) |
1781 | { | 1803 | { |
1782 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1804 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -1839,6 +1861,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1839 | I915_WRITE(SDEIER, hotplug_mask); | 1861 | I915_WRITE(SDEIER, hotplug_mask); |
1840 | POSTING_READ(SDEIER); | 1862 | POSTING_READ(SDEIER); |
1841 | 1863 | ||
1864 | ironlake_enable_pch_hotplug(dev); | ||
1865 | |||
1842 | if (IS_IRONLAKE_M(dev)) { | 1866 | if (IS_IRONLAKE_M(dev)) { |
1843 | /* Clear & enable PCU event interrupts */ | 1867 | /* Clear & enable PCU event interrupts */ |
1844 | I915_WRITE(DEIIR, DE_PCU_EVENT); | 1868 | I915_WRITE(DEIIR, DE_PCU_EVENT); |
@@ -1896,6 +1920,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) | |||
1896 | I915_WRITE(SDEIER, hotplug_mask); | 1920 | I915_WRITE(SDEIER, hotplug_mask); |
1897 | POSTING_READ(SDEIER); | 1921 | POSTING_READ(SDEIER); |
1898 | 1922 | ||
1923 | ironlake_enable_pch_hotplug(dev); | ||
1924 | |||
1899 | return 0; | 1925 | return 0; |
1900 | } | 1926 | } |
1901 | 1927 | ||
@@ -2020,6 +2046,10 @@ static void ironlake_irq_uninstall(struct drm_device *dev) | |||
2020 | I915_WRITE(GTIMR, 0xffffffff); | 2046 | I915_WRITE(GTIMR, 0xffffffff); |
2021 | I915_WRITE(GTIER, 0x0); | 2047 | I915_WRITE(GTIER, 0x0); |
2022 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2048 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
2049 | |||
2050 | I915_WRITE(SDEIMR, 0xffffffff); | ||
2051 | I915_WRITE(SDEIER, 0x0); | ||
2052 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | ||
2023 | } | 2053 | } |
2024 | 2054 | ||
2025 | static void i915_driver_irq_uninstall(struct drm_device * dev) | 2055 | static void i915_driver_irq_uninstall(struct drm_device * dev) |
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c index 83b7b81bb2b8..cc8f6d49cf20 100644 --- a/drivers/gpu/drm/i915/i915_mem.c +++ b/drivers/gpu/drm/i915/i915_mem.c | |||
@@ -202,7 +202,7 @@ static int init_heap(struct mem_block **heap, int start, int size) | |||
202 | blocks->next = blocks->prev = *heap; | 202 | blocks->next = blocks->prev = *heap; |
203 | 203 | ||
204 | memset(*heap, 0, sizeof(**heap)); | 204 | memset(*heap, 0, sizeof(**heap)); |
205 | (*heap)->file_priv = (struct drm_file *) - 1; | 205 | (*heap)->file_priv = (struct drm_file *) -1; |
206 | (*heap)->next = (*heap)->prev = blocks; | 206 | (*heap)->next = (*heap)->prev = blocks; |
207 | return 0; | 207 | return 0; |
208 | } | 208 | } |
@@ -359,19 +359,19 @@ int i915_mem_init_heap(struct drm_device *dev, void *data, | |||
359 | return init_heap(heap, initheap->start, initheap->size); | 359 | return init_heap(heap, initheap->start, initheap->size); |
360 | } | 360 | } |
361 | 361 | ||
362 | int i915_mem_destroy_heap( struct drm_device *dev, void *data, | 362 | int i915_mem_destroy_heap(struct drm_device *dev, void *data, |
363 | struct drm_file *file_priv ) | 363 | struct drm_file *file_priv) |
364 | { | 364 | { |
365 | drm_i915_private_t *dev_priv = dev->dev_private; | 365 | drm_i915_private_t *dev_priv = dev->dev_private; |
366 | drm_i915_mem_destroy_heap_t *destroyheap = data; | 366 | drm_i915_mem_destroy_heap_t *destroyheap = data; |
367 | struct mem_block **heap; | 367 | struct mem_block **heap; |
368 | 368 | ||
369 | if ( !dev_priv ) { | 369 | if (!dev_priv) { |
370 | DRM_ERROR( "called with no initialization\n" ); | 370 | DRM_ERROR("called with no initialization\n"); |
371 | return -EINVAL; | 371 | return -EINVAL; |
372 | } | 372 | } |
373 | 373 | ||
374 | heap = get_heap( dev_priv, destroyheap->region ); | 374 | heap = get_heap(dev_priv, destroyheap->region); |
375 | if (!heap) { | 375 | if (!heap) { |
376 | DRM_ERROR("get_heap failed"); | 376 | DRM_ERROR("get_heap failed"); |
377 | return -EFAULT; | 377 | return -EFAULT; |
@@ -382,6 +382,6 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data, | |||
382 | return -EFAULT; | 382 | return -EFAULT; |
383 | } | 383 | } |
384 | 384 | ||
385 | i915_mem_takedown( heap ); | 385 | i915_mem_takedown(heap); |
386 | return 0; | 386 | return 0; |
387 | } | 387 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 542453f7498c..5a09416e611f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -156,7 +156,7 @@ | |||
156 | #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) | 156 | #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) |
157 | #define MI_SUSPEND_FLUSH_EN (1<<0) | 157 | #define MI_SUSPEND_FLUSH_EN (1<<0) |
158 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) | 158 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) |
159 | #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) | 159 | #define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) |
160 | #define MI_OVERLAY_CONTINUE (0x0<<21) | 160 | #define MI_OVERLAY_CONTINUE (0x0<<21) |
161 | #define MI_OVERLAY_ON (0x1<<21) | 161 | #define MI_OVERLAY_ON (0x1<<21) |
162 | #define MI_OVERLAY_OFF (0x2<<21) | 162 | #define MI_OVERLAY_OFF (0x2<<21) |
@@ -194,6 +194,13 @@ | |||
194 | #define MI_SEMAPHORE_UPDATE (1<<21) | 194 | #define MI_SEMAPHORE_UPDATE (1<<21) |
195 | #define MI_SEMAPHORE_COMPARE (1<<20) | 195 | #define MI_SEMAPHORE_COMPARE (1<<20) |
196 | #define MI_SEMAPHORE_REGISTER (1<<18) | 196 | #define MI_SEMAPHORE_REGISTER (1<<18) |
197 | #define MI_SEMAPHORE_SYNC_RV (2<<16) | ||
198 | #define MI_SEMAPHORE_SYNC_RB (0<<16) | ||
199 | #define MI_SEMAPHORE_SYNC_VR (0<<16) | ||
200 | #define MI_SEMAPHORE_SYNC_VB (2<<16) | ||
201 | #define MI_SEMAPHORE_SYNC_BR (2<<16) | ||
202 | #define MI_SEMAPHORE_SYNC_BV (0<<16) | ||
203 | #define MI_SEMAPHORE_SYNC_INVALID (1<<0) | ||
197 | /* | 204 | /* |
198 | * 3D instructions used by the kernel | 205 | * 3D instructions used by the kernel |
199 | */ | 206 | */ |
@@ -235,16 +242,22 @@ | |||
235 | #define ASYNC_FLIP (1<<22) | 242 | #define ASYNC_FLIP (1<<22) |
236 | #define DISPLAY_PLANE_A (0<<20) | 243 | #define DISPLAY_PLANE_A (0<<20) |
237 | #define DISPLAY_PLANE_B (1<<20) | 244 | #define DISPLAY_PLANE_B (1<<20) |
238 | #define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2) | 245 | #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) |
239 | #define PIPE_CONTROL_QW_WRITE (1<<14) | 246 | #define PIPE_CONTROL_CS_STALL (1<<20) |
240 | #define PIPE_CONTROL_DEPTH_STALL (1<<13) | 247 | #define PIPE_CONTROL_QW_WRITE (1<<14) |
241 | #define PIPE_CONTROL_WC_FLUSH (1<<12) | 248 | #define PIPE_CONTROL_DEPTH_STALL (1<<13) |
242 | #define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */ | 249 | #define PIPE_CONTROL_WRITE_FLUSH (1<<12) |
243 | #define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */ | 250 | #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ |
244 | #define PIPE_CONTROL_ISP_DIS (1<<9) | 251 | #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */ |
245 | #define PIPE_CONTROL_NOTIFY (1<<8) | 252 | #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ |
253 | #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) | ||
254 | #define PIPE_CONTROL_NOTIFY (1<<8) | ||
255 | #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) | ||
256 | #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) | ||
257 | #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) | ||
258 | #define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1) | ||
259 | #define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) | ||
246 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ | 260 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ |
247 | #define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ | ||
248 | 261 | ||
249 | 262 | ||
250 | /* | 263 | /* |
@@ -296,6 +309,12 @@ | |||
296 | #define RING_CTL(base) ((base)+0x3c) | 309 | #define RING_CTL(base) ((base)+0x3c) |
297 | #define RING_SYNC_0(base) ((base)+0x40) | 310 | #define RING_SYNC_0(base) ((base)+0x40) |
298 | #define RING_SYNC_1(base) ((base)+0x44) | 311 | #define RING_SYNC_1(base) ((base)+0x44) |
312 | #define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) | ||
313 | #define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) | ||
314 | #define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) | ||
315 | #define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) | ||
316 | #define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) | ||
317 | #define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) | ||
299 | #define RING_MAX_IDLE(base) ((base)+0x54) | 318 | #define RING_MAX_IDLE(base) ((base)+0x54) |
300 | #define RING_HWS_PGA(base) ((base)+0x80) | 319 | #define RING_HWS_PGA(base) ((base)+0x80) |
301 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) | 320 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) |
@@ -470,7 +489,7 @@ | |||
470 | 489 | ||
471 | /* Enables non-sequential data reads through arbiter | 490 | /* Enables non-sequential data reads through arbiter |
472 | */ | 491 | */ |
473 | #define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) | 492 | #define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) |
474 | 493 | ||
475 | /* Disable FSB snooping of cacheable write cycles from binner/render | 494 | /* Disable FSB snooping of cacheable write cycles from binner/render |
476 | * command stream | 495 | * command stream |
@@ -626,7 +645,7 @@ | |||
626 | 645 | ||
627 | #define ILK_DISPLAY_CHICKEN1 0x42000 | 646 | #define ILK_DISPLAY_CHICKEN1 0x42000 |
628 | #define ILK_FBCQ_DIS (1<<22) | 647 | #define ILK_FBCQ_DIS (1<<22) |
629 | #define ILK_PABSTRETCH_DIS (1<<21) | 648 | #define ILK_PABSTRETCH_DIS (1<<21) |
630 | 649 | ||
631 | 650 | ||
632 | /* | 651 | /* |
@@ -2358,7 +2377,7 @@ | |||
2358 | 2377 | ||
2359 | #define DSPFW1 0x70034 | 2378 | #define DSPFW1 0x70034 |
2360 | #define DSPFW_SR_SHIFT 23 | 2379 | #define DSPFW_SR_SHIFT 23 |
2361 | #define DSPFW_SR_MASK (0x1ff<<23) | 2380 | #define DSPFW_SR_MASK (0x1ff<<23) |
2362 | #define DSPFW_CURSORB_SHIFT 16 | 2381 | #define DSPFW_CURSORB_SHIFT 16 |
2363 | #define DSPFW_CURSORB_MASK (0x3f<<16) | 2382 | #define DSPFW_CURSORB_MASK (0x3f<<16) |
2364 | #define DSPFW_PLANEB_SHIFT 8 | 2383 | #define DSPFW_PLANEB_SHIFT 8 |
@@ -2416,6 +2435,7 @@ | |||
2416 | #define WM0_PIPE_CURSOR_MASK (0x1f) | 2435 | #define WM0_PIPE_CURSOR_MASK (0x1f) |
2417 | 2436 | ||
2418 | #define WM0_PIPEB_ILK 0x45104 | 2437 | #define WM0_PIPEB_ILK 0x45104 |
2438 | #define WM0_PIPEC_IVB 0x45200 | ||
2419 | #define WM1_LP_ILK 0x45108 | 2439 | #define WM1_LP_ILK 0x45108 |
2420 | #define WM1_LP_SR_EN (1<<31) | 2440 | #define WM1_LP_SR_EN (1<<31) |
2421 | #define WM1_LP_LATENCY_SHIFT 24 | 2441 | #define WM1_LP_LATENCY_SHIFT 24 |
@@ -2554,10 +2574,18 @@ | |||
2554 | #define _CURBBASE 0x700c4 | 2574 | #define _CURBBASE 0x700c4 |
2555 | #define _CURBPOS 0x700c8 | 2575 | #define _CURBPOS 0x700c8 |
2556 | 2576 | ||
2577 | #define _CURBCNTR_IVB 0x71080 | ||
2578 | #define _CURBBASE_IVB 0x71084 | ||
2579 | #define _CURBPOS_IVB 0x71088 | ||
2580 | |||
2557 | #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) | 2581 | #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) |
2558 | #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) | 2582 | #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) |
2559 | #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) | 2583 | #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) |
2560 | 2584 | ||
2585 | #define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB) | ||
2586 | #define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB) | ||
2587 | #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) | ||
2588 | |||
2561 | /* Display A control */ | 2589 | /* Display A control */ |
2562 | #define _DSPACNTR 0x70180 | 2590 | #define _DSPACNTR 0x70180 |
2563 | #define DISPLAY_PLANE_ENABLE (1<<31) | 2591 | #define DISPLAY_PLANE_ENABLE (1<<31) |
@@ -2903,12 +2931,13 @@ | |||
2903 | #define SDEIER 0xc400c | 2931 | #define SDEIER 0xc400c |
2904 | 2932 | ||
2905 | /* digital port hotplug */ | 2933 | /* digital port hotplug */ |
2906 | #define PCH_PORT_HOTPLUG 0xc4030 | 2934 | #define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ |
2907 | #define PORTD_HOTPLUG_ENABLE (1 << 20) | 2935 | #define PORTD_HOTPLUG_ENABLE (1 << 20) |
2908 | #define PORTD_PULSE_DURATION_2ms (0) | 2936 | #define PORTD_PULSE_DURATION_2ms (0) |
2909 | #define PORTD_PULSE_DURATION_4_5ms (1 << 18) | 2937 | #define PORTD_PULSE_DURATION_4_5ms (1 << 18) |
2910 | #define PORTD_PULSE_DURATION_6ms (2 << 18) | 2938 | #define PORTD_PULSE_DURATION_6ms (2 << 18) |
2911 | #define PORTD_PULSE_DURATION_100ms (3 << 18) | 2939 | #define PORTD_PULSE_DURATION_100ms (3 << 18) |
2940 | #define PORTD_PULSE_DURATION_MASK (3 << 18) | ||
2912 | #define PORTD_HOTPLUG_NO_DETECT (0) | 2941 | #define PORTD_HOTPLUG_NO_DETECT (0) |
2913 | #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) | 2942 | #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) |
2914 | #define PORTD_HOTPLUG_LONG_DETECT (1 << 17) | 2943 | #define PORTD_HOTPLUG_LONG_DETECT (1 << 17) |
@@ -2917,6 +2946,7 @@ | |||
2917 | #define PORTC_PULSE_DURATION_4_5ms (1 << 10) | 2946 | #define PORTC_PULSE_DURATION_4_5ms (1 << 10) |
2918 | #define PORTC_PULSE_DURATION_6ms (2 << 10) | 2947 | #define PORTC_PULSE_DURATION_6ms (2 << 10) |
2919 | #define PORTC_PULSE_DURATION_100ms (3 << 10) | 2948 | #define PORTC_PULSE_DURATION_100ms (3 << 10) |
2949 | #define PORTC_PULSE_DURATION_MASK (3 << 10) | ||
2920 | #define PORTC_HOTPLUG_NO_DETECT (0) | 2950 | #define PORTC_HOTPLUG_NO_DETECT (0) |
2921 | #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) | 2951 | #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) |
2922 | #define PORTC_HOTPLUG_LONG_DETECT (1 << 9) | 2952 | #define PORTC_HOTPLUG_LONG_DETECT (1 << 9) |
@@ -2925,6 +2955,7 @@ | |||
2925 | #define PORTB_PULSE_DURATION_4_5ms (1 << 2) | 2955 | #define PORTB_PULSE_DURATION_4_5ms (1 << 2) |
2926 | #define PORTB_PULSE_DURATION_6ms (2 << 2) | 2956 | #define PORTB_PULSE_DURATION_6ms (2 << 2) |
2927 | #define PORTB_PULSE_DURATION_100ms (3 << 2) | 2957 | #define PORTB_PULSE_DURATION_100ms (3 << 2) |
2958 | #define PORTB_PULSE_DURATION_MASK (3 << 2) | ||
2928 | #define PORTB_HOTPLUG_NO_DETECT (0) | 2959 | #define PORTB_HOTPLUG_NO_DETECT (0) |
2929 | #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) | 2960 | #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) |
2930 | #define PORTB_HOTPLUG_LONG_DETECT (1 << 1) | 2961 | #define PORTB_HOTPLUG_LONG_DETECT (1 << 1) |
@@ -2945,15 +2976,15 @@ | |||
2945 | 2976 | ||
2946 | #define _PCH_DPLL_A 0xc6014 | 2977 | #define _PCH_DPLL_A 0xc6014 |
2947 | #define _PCH_DPLL_B 0xc6018 | 2978 | #define _PCH_DPLL_B 0xc6018 |
2948 | #define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B) | 2979 | #define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) |
2949 | 2980 | ||
2950 | #define _PCH_FPA0 0xc6040 | 2981 | #define _PCH_FPA0 0xc6040 |
2951 | #define FP_CB_TUNE (0x3<<22) | 2982 | #define FP_CB_TUNE (0x3<<22) |
2952 | #define _PCH_FPA1 0xc6044 | 2983 | #define _PCH_FPA1 0xc6044 |
2953 | #define _PCH_FPB0 0xc6048 | 2984 | #define _PCH_FPB0 0xc6048 |
2954 | #define _PCH_FPB1 0xc604c | 2985 | #define _PCH_FPB1 0xc604c |
2955 | #define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0) | 2986 | #define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0) |
2956 | #define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1) | 2987 | #define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1) |
2957 | 2988 | ||
2958 | #define PCH_DPLL_TEST 0xc606c | 2989 | #define PCH_DPLL_TEST 0xc606c |
2959 | 2990 | ||
@@ -3167,6 +3198,7 @@ | |||
3167 | #define FDI_LINK_TRAIN_NONE_IVB (3<<8) | 3198 | #define FDI_LINK_TRAIN_NONE_IVB (3<<8) |
3168 | 3199 | ||
3169 | /* both Tx and Rx */ | 3200 | /* both Tx and Rx */ |
3201 | #define FDI_COMPOSITE_SYNC (1<<11) | ||
3170 | #define FDI_LINK_TRAIN_AUTO (1<<10) | 3202 | #define FDI_LINK_TRAIN_AUTO (1<<10) |
3171 | #define FDI_SCRAMBLING_ENABLE (0<<7) | 3203 | #define FDI_SCRAMBLING_ENABLE (0<<7) |
3172 | #define FDI_SCRAMBLING_DISABLE (1<<7) | 3204 | #define FDI_SCRAMBLING_DISABLE (1<<7) |
@@ -3308,15 +3340,35 @@ | |||
3308 | #define PCH_PP_STATUS 0xc7200 | 3340 | #define PCH_PP_STATUS 0xc7200 |
3309 | #define PCH_PP_CONTROL 0xc7204 | 3341 | #define PCH_PP_CONTROL 0xc7204 |
3310 | #define PANEL_UNLOCK_REGS (0xabcd << 16) | 3342 | #define PANEL_UNLOCK_REGS (0xabcd << 16) |
3343 | #define PANEL_UNLOCK_MASK (0xffff << 16) | ||
3311 | #define EDP_FORCE_VDD (1 << 3) | 3344 | #define EDP_FORCE_VDD (1 << 3) |
3312 | #define EDP_BLC_ENABLE (1 << 2) | 3345 | #define EDP_BLC_ENABLE (1 << 2) |
3313 | #define PANEL_POWER_RESET (1 << 1) | 3346 | #define PANEL_POWER_RESET (1 << 1) |
3314 | #define PANEL_POWER_OFF (0 << 0) | 3347 | #define PANEL_POWER_OFF (0 << 0) |
3315 | #define PANEL_POWER_ON (1 << 0) | 3348 | #define PANEL_POWER_ON (1 << 0) |
3316 | #define PCH_PP_ON_DELAYS 0xc7208 | 3349 | #define PCH_PP_ON_DELAYS 0xc7208 |
3350 | #define PANEL_PORT_SELECT_MASK (3 << 30) | ||
3351 | #define PANEL_PORT_SELECT_LVDS (0 << 30) | ||
3352 | #define PANEL_PORT_SELECT_DPA (1 << 30) | ||
3317 | #define EDP_PANEL (1 << 30) | 3353 | #define EDP_PANEL (1 << 30) |
3354 | #define PANEL_PORT_SELECT_DPC (2 << 30) | ||
3355 | #define PANEL_PORT_SELECT_DPD (3 << 30) | ||
3356 | #define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) | ||
3357 | #define PANEL_POWER_UP_DELAY_SHIFT 16 | ||
3358 | #define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) | ||
3359 | #define PANEL_LIGHT_ON_DELAY_SHIFT 0 | ||
3360 | |||
3318 | #define PCH_PP_OFF_DELAYS 0xc720c | 3361 | #define PCH_PP_OFF_DELAYS 0xc720c |
3362 | #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) | ||
3363 | #define PANEL_POWER_DOWN_DELAY_SHIFT 16 | ||
3364 | #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) | ||
3365 | #define PANEL_LIGHT_OFF_DELAY_SHIFT 0 | ||
3366 | |||
3319 | #define PCH_PP_DIVISOR 0xc7210 | 3367 | #define PCH_PP_DIVISOR 0xc7210 |
3368 | #define PP_REFERENCE_DIVIDER_MASK (0xffffff00) | ||
3369 | #define PP_REFERENCE_DIVIDER_SHIFT 8 | ||
3370 | #define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) | ||
3371 | #define PANEL_POWER_CYCLE_DELAY_SHIFT 0 | ||
3320 | 3372 | ||
3321 | #define PCH_DP_B 0xe4100 | 3373 | #define PCH_DP_B 0xe4100 |
3322 | #define PCH_DPB_AUX_CH_CTL 0xe4110 | 3374 | #define PCH_DPB_AUX_CH_CTL 0xe4110 |
@@ -3470,4 +3522,29 @@ | |||
3470 | #define GEN6_PCODE_DATA 0x138128 | 3522 | #define GEN6_PCODE_DATA 0x138128 |
3471 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 | 3523 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 |
3472 | 3524 | ||
3525 | #define G4X_AUD_VID_DID 0x62020 | ||
3526 | #define INTEL_AUDIO_DEVCL 0x808629FB | ||
3527 | #define INTEL_AUDIO_DEVBLC 0x80862801 | ||
3528 | #define INTEL_AUDIO_DEVCTG 0x80862802 | ||
3529 | |||
3530 | #define G4X_AUD_CNTL_ST 0x620B4 | ||
3531 | #define G4X_ELDV_DEVCL_DEVBLC (1 << 13) | ||
3532 | #define G4X_ELDV_DEVCTG (1 << 14) | ||
3533 | #define G4X_ELD_ADDR (0xf << 5) | ||
3534 | #define G4X_ELD_ACK (1 << 4) | ||
3535 | #define G4X_HDMIW_HDMIEDID 0x6210C | ||
3536 | |||
3537 | #define GEN5_HDMIW_HDMIEDID_A 0xE2050 | ||
3538 | #define GEN5_AUD_CNTL_ST_A 0xE20B4 | ||
3539 | #define GEN5_ELD_BUFFER_SIZE (0x1f << 10) | ||
3540 | #define GEN5_ELD_ADDRESS (0x1f << 5) | ||
3541 | #define GEN5_ELD_ACK (1 << 4) | ||
3542 | #define GEN5_AUD_CNTL_ST2 0xE20C0 | ||
3543 | #define GEN5_ELD_VALIDB (1 << 0) | ||
3544 | #define GEN5_CP_READYB (1 << 1) | ||
3545 | |||
3546 | #define GEN7_HDMIW_HDMIEDID_A 0xE5050 | ||
3547 | #define GEN7_AUD_CNTRL_ST_A 0xE50B4 | ||
3548 | #define GEN7_AUD_CNTRL_ST2 0xE50C0 | ||
3549 | |||
3473 | #endif /* _I915_REG_H_ */ | 3550 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index f10742359ec9..f8f602d76650 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -60,7 +60,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | |||
60 | else | 60 | else |
61 | array = dev_priv->save_palette_b; | 61 | array = dev_priv->save_palette_b; |
62 | 62 | ||
63 | for(i = 0; i < 256; i++) | 63 | for (i = 0; i < 256; i++) |
64 | array[i] = I915_READ(reg + (i << 2)); | 64 | array[i] = I915_READ(reg + (i << 2)); |
65 | } | 65 | } |
66 | 66 | ||
@@ -82,7 +82,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) | |||
82 | else | 82 | else |
83 | array = dev_priv->save_palette_b; | 83 | array = dev_priv->save_palette_b; |
84 | 84 | ||
85 | for(i = 0; i < 256; i++) | 85 | for (i = 0; i < 256; i++) |
86 | I915_WRITE(reg + (i << 2), array[i]); | 86 | I915_WRITE(reg + (i << 2), array[i]); |
87 | } | 87 | } |
88 | 88 | ||
@@ -887,10 +887,10 @@ int i915_restore_state(struct drm_device *dev) | |||
887 | mutex_lock(&dev->struct_mutex); | 887 | mutex_lock(&dev->struct_mutex); |
888 | 888 | ||
889 | /* Cache mode state */ | 889 | /* Cache mode state */ |
890 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); | 890 | I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); |
891 | 891 | ||
892 | /* Memory arbitration state */ | 892 | /* Memory arbitration state */ |
893 | I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); | 893 | I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); |
894 | 894 | ||
895 | for (i = 0; i < 16; i++) { | 895 | for (i = 0; i < 16; i++) { |
896 | I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); | 896 | I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index d623fefbfaca..dac7bba4d9da 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -385,29 +385,29 @@ TRACE_EVENT(i915_flip_complete, | |||
385 | ); | 385 | ); |
386 | 386 | ||
387 | TRACE_EVENT(i915_reg_rw, | 387 | TRACE_EVENT(i915_reg_rw, |
388 | TP_PROTO(bool write, u32 reg, u64 val, int len), | 388 | TP_PROTO(bool write, u32 reg, u64 val, int len), |
389 | 389 | ||
390 | TP_ARGS(write, reg, val, len), | 390 | TP_ARGS(write, reg, val, len), |
391 | 391 | ||
392 | TP_STRUCT__entry( | 392 | TP_STRUCT__entry( |
393 | __field(u64, val) | 393 | __field(u64, val) |
394 | __field(u32, reg) | 394 | __field(u32, reg) |
395 | __field(u16, write) | 395 | __field(u16, write) |
396 | __field(u16, len) | 396 | __field(u16, len) |
397 | ), | 397 | ), |
398 | 398 | ||
399 | TP_fast_assign( | 399 | TP_fast_assign( |
400 | __entry->val = (u64)val; | 400 | __entry->val = (u64)val; |
401 | __entry->reg = reg; | 401 | __entry->reg = reg; |
402 | __entry->write = write; | 402 | __entry->write = write; |
403 | __entry->len = len; | 403 | __entry->len = len; |
404 | ), | 404 | ), |
405 | 405 | ||
406 | TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", | 406 | TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", |
407 | __entry->write ? "write" : "read", | 407 | __entry->write ? "write" : "read", |
408 | __entry->reg, __entry->len, | 408 | __entry->reg, __entry->len, |
409 | (u32)(__entry->val & 0xffffffff), | 409 | (u32)(__entry->val & 0xffffffff), |
410 | (u32)(__entry->val >> 32)) | 410 | (u32)(__entry->val >> 32)) |
411 | ); | 411 | ); |
412 | 412 | ||
413 | #endif /* _I915_TRACE_H_ */ | 413 | #endif /* _I915_TRACE_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 2cb8e0b9f1ee..cb912106d1a2 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c | |||
@@ -64,7 +64,7 @@ static int intel_dsm(acpi_handle handle, int func, int arg) | |||
64 | 64 | ||
65 | case ACPI_TYPE_BUFFER: | 65 | case ACPI_TYPE_BUFFER: |
66 | if (obj->buffer.length == 4) { | 66 | if (obj->buffer.length == 4) { |
67 | result =(obj->buffer.pointer[0] | | 67 | result = (obj->buffer.pointer[0] | |
68 | (obj->buffer.pointer[1] << 8) | | 68 | (obj->buffer.pointer[1] << 8) | |
69 | (obj->buffer.pointer[2] << 16) | | 69 | (obj->buffer.pointer[2] << 16) | |
70 | (obj->buffer.pointer[3] << 24)); | 70 | (obj->buffer.pointer[3] << 24)); |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 61abef8a8119..63880e2e5cfd 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright © 2006 Intel Corporation | 2 | * Copyright © 2006 Intel Corporation |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -309,6 +309,13 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
309 | dev_priv->lvds_use_ssc = general->enable_ssc; | 309 | dev_priv->lvds_use_ssc = general->enable_ssc; |
310 | dev_priv->lvds_ssc_freq = | 310 | dev_priv->lvds_ssc_freq = |
311 | intel_bios_ssc_frequency(dev, general->ssc_freq); | 311 | intel_bios_ssc_frequency(dev, general->ssc_freq); |
312 | dev_priv->display_clock_mode = general->display_clock_mode; | ||
313 | DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n", | ||
314 | dev_priv->int_tv_support, | ||
315 | dev_priv->int_crt_support, | ||
316 | dev_priv->lvds_use_ssc, | ||
317 | dev_priv->lvds_ssc_freq, | ||
318 | dev_priv->display_clock_mode); | ||
312 | } | 319 | } |
313 | } | 320 | } |
314 | 321 | ||
@@ -381,7 +388,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
381 | if (p_child->dvo_port != DEVICE_PORT_DVOB && | 388 | if (p_child->dvo_port != DEVICE_PORT_DVOB && |
382 | p_child->dvo_port != DEVICE_PORT_DVOC) { | 389 | p_child->dvo_port != DEVICE_PORT_DVOC) { |
383 | /* skip the incorrect SDVO port */ | 390 | /* skip the incorrect SDVO port */ |
384 | DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n"); | 391 | DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); |
385 | continue; | 392 | continue; |
386 | } | 393 | } |
387 | DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" | 394 | DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" |
@@ -396,15 +403,13 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
396 | p_mapping->dvo_wiring = p_child->dvo_wiring; | 403 | p_mapping->dvo_wiring = p_child->dvo_wiring; |
397 | p_mapping->ddc_pin = p_child->ddc_pin; | 404 | p_mapping->ddc_pin = p_child->ddc_pin; |
398 | p_mapping->i2c_pin = p_child->i2c_pin; | 405 | p_mapping->i2c_pin = p_child->i2c_pin; |
399 | p_mapping->i2c_speed = p_child->i2c_speed; | ||
400 | p_mapping->initialized = 1; | 406 | p_mapping->initialized = 1; |
401 | DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n", | 407 | DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", |
402 | p_mapping->dvo_port, | 408 | p_mapping->dvo_port, |
403 | p_mapping->slave_addr, | 409 | p_mapping->slave_addr, |
404 | p_mapping->dvo_wiring, | 410 | p_mapping->dvo_wiring, |
405 | p_mapping->ddc_pin, | 411 | p_mapping->ddc_pin, |
406 | p_mapping->i2c_pin, | 412 | p_mapping->i2c_pin); |
407 | p_mapping->i2c_speed); | ||
408 | } else { | 413 | } else { |
409 | DRM_DEBUG_KMS("Maybe one SDVO port is shared by " | 414 | DRM_DEBUG_KMS("Maybe one SDVO port is shared by " |
410 | "two SDVO device.\n"); | 415 | "two SDVO device.\n"); |
@@ -564,7 +569,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, | |||
564 | count++; | 569 | count++; |
565 | } | 570 | } |
566 | if (!count) { | 571 | if (!count) { |
567 | DRM_DEBUG_KMS("no child dev is parsed from VBT \n"); | 572 | DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); |
568 | return; | 573 | return; |
569 | } | 574 | } |
570 | dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL); | 575 | dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL); |
@@ -610,7 +615,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) | |||
610 | /* Default to using SSC */ | 615 | /* Default to using SSC */ |
611 | dev_priv->lvds_use_ssc = 1; | 616 | dev_priv->lvds_use_ssc = 1; |
612 | dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); | 617 | dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); |
613 | DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); | 618 | DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); |
614 | 619 | ||
615 | /* eDP data */ | 620 | /* eDP data */ |
616 | dev_priv->edp.bpp = 18; | 621 | dev_priv->edp.bpp = 18; |
@@ -639,7 +644,7 @@ intel_parse_bios(struct drm_device *dev) | |||
639 | if (dev_priv->opregion.vbt) { | 644 | if (dev_priv->opregion.vbt) { |
640 | struct vbt_header *vbt = dev_priv->opregion.vbt; | 645 | struct vbt_header *vbt = dev_priv->opregion.vbt; |
641 | if (memcmp(vbt->signature, "$VBT", 4) == 0) { | 646 | if (memcmp(vbt->signature, "$VBT", 4) == 0) { |
642 | DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n", | 647 | DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", |
643 | vbt->signature); | 648 | vbt->signature); |
644 | bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); | 649 | bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); |
645 | } else | 650 | } else |
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 5f8e4edcbbb9..8af3735e27c6 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright © 2006 Intel Corporation | 2 | * Copyright © 2006 Intel Corporation |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -120,7 +120,9 @@ struct bdb_general_features { | |||
120 | u8 ssc_freq:1; | 120 | u8 ssc_freq:1; |
121 | u8 enable_lfp_on_override:1; | 121 | u8 enable_lfp_on_override:1; |
122 | u8 disable_ssc_ddt:1; | 122 | u8 disable_ssc_ddt:1; |
123 | u8 rsvd8:3; /* finish byte */ | 123 | u8 rsvd7:1; |
124 | u8 display_clock_mode:1; | ||
125 | u8 rsvd8:1; /* finish byte */ | ||
124 | 126 | ||
125 | /* bits 3 */ | 127 | /* bits 3 */ |
126 | u8 disable_smooth_vision:1; | 128 | u8 disable_smooth_vision:1; |
@@ -133,7 +135,10 @@ struct bdb_general_features { | |||
133 | /* bits 5 */ | 135 | /* bits 5 */ |
134 | u8 int_crt_support:1; | 136 | u8 int_crt_support:1; |
135 | u8 int_tv_support:1; | 137 | u8 int_tv_support:1; |
136 | u8 rsvd11:6; /* finish byte */ | 138 | u8 int_efp_support:1; |
139 | u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */ | ||
140 | u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */ | ||
141 | u8 rsvd11:3; /* finish byte */ | ||
137 | } __attribute__((packed)); | 142 | } __attribute__((packed)); |
138 | 143 | ||
139 | /* pre-915 */ | 144 | /* pre-915 */ |
@@ -197,8 +202,7 @@ struct bdb_general_features { | |||
197 | struct child_device_config { | 202 | struct child_device_config { |
198 | u16 handle; | 203 | u16 handle; |
199 | u16 device_type; | 204 | u16 device_type; |
200 | u8 i2c_speed; | 205 | u8 device_id[10]; /* ascii string */ |
201 | u8 rsvd[9]; | ||
202 | u16 addin_offset; | 206 | u16 addin_offset; |
203 | u8 dvo_port; /* See Device_PORT_* above */ | 207 | u8 dvo_port; /* See Device_PORT_* above */ |
204 | u8 i2c_pin; | 208 | u8 i2c_pin; |
@@ -240,7 +244,7 @@ struct bdb_general_definitions { | |||
240 | * And the device num is related with the size of general definition | 244 | * And the device num is related with the size of general definition |
241 | * block. It is obtained by using the following formula: | 245 | * block. It is obtained by using the following formula: |
242 | * number = (block_size - sizeof(bdb_general_definitions))/ | 246 | * number = (block_size - sizeof(bdb_general_definitions))/ |
243 | * sizeof(child_device_config); | 247 | * sizeof(child_device_config); |
244 | */ | 248 | */ |
245 | struct child_device_config devices[0]; | 249 | struct child_device_config devices[0]; |
246 | } __attribute__((packed)); | 250 | } __attribute__((packed)); |
@@ -446,11 +450,11 @@ struct bdb_driver_features { | |||
446 | #define EDP_VSWING_1_2V 3 | 450 | #define EDP_VSWING_1_2V 3 |
447 | 451 | ||
448 | struct edp_power_seq { | 452 | struct edp_power_seq { |
449 | u16 t3; | 453 | u16 t1_t3; |
450 | u16 t7; | 454 | u16 t8; |
451 | u16 t9; | 455 | u16 t9; |
452 | u16 t10; | 456 | u16 t10; |
453 | u16 t12; | 457 | u16 t11_t12; |
454 | } __attribute__ ((packed)); | 458 | } __attribute__ ((packed)); |
455 | 459 | ||
456 | struct edp_link_params { | 460 | struct edp_link_params { |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 0979d8877880..fee0ad02c6d0 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -69,7 +69,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) | |||
69 | temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); | 69 | temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); |
70 | temp &= ~ADPA_DAC_ENABLE; | 70 | temp &= ~ADPA_DAC_ENABLE; |
71 | 71 | ||
72 | switch(mode) { | 72 | switch (mode) { |
73 | case DRM_MODE_DPMS_ON: | 73 | case DRM_MODE_DPMS_ON: |
74 | temp |= ADPA_DAC_ENABLE; | 74 | temp |= ADPA_DAC_ENABLE; |
75 | break; | 75 | break; |
@@ -152,17 +152,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
152 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 152 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
153 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; | 153 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; |
154 | 154 | ||
155 | if (intel_crtc->pipe == 0) { | 155 | /* For CPT allow 3 pipe config, for others just use A or B */ |
156 | if (HAS_PCH_CPT(dev)) | 156 | if (HAS_PCH_CPT(dev)) |
157 | adpa |= PORT_TRANS_A_SEL_CPT; | 157 | adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); |
158 | else | 158 | else if (intel_crtc->pipe == 0) |
159 | adpa |= ADPA_PIPE_A_SELECT; | 159 | adpa |= ADPA_PIPE_A_SELECT; |
160 | } else { | 160 | else |
161 | if (HAS_PCH_CPT(dev)) | 161 | adpa |= ADPA_PIPE_B_SELECT; |
162 | adpa |= PORT_TRANS_B_SEL_CPT; | ||
163 | else | ||
164 | adpa |= ADPA_PIPE_B_SELECT; | ||
165 | } | ||
166 | 162 | ||
167 | if (!HAS_PCH_SPLIT(dev)) | 163 | if (!HAS_PCH_SPLIT(dev)) |
168 | I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); | 164 | I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 04411ad2e779..981b1f1c04d8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/vgaarb.h> | 33 | #include <linux/vgaarb.h> |
34 | #include <drm/drm_edid.h> | ||
34 | #include "drmP.h" | 35 | #include "drmP.h" |
35 | #include "intel_drv.h" | 36 | #include "intel_drv.h" |
36 | #include "i915_drm.h" | 37 | #include "i915_drm.h" |
@@ -42,39 +43,39 @@ | |||
42 | 43 | ||
43 | #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) | 44 | #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) |
44 | 45 | ||
45 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); | 46 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type); |
46 | static void intel_update_watermarks(struct drm_device *dev); | 47 | static void intel_update_watermarks(struct drm_device *dev); |
47 | static void intel_increase_pllclock(struct drm_crtc *crtc); | 48 | static void intel_increase_pllclock(struct drm_crtc *crtc); |
48 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); | 49 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
49 | 50 | ||
50 | typedef struct { | 51 | typedef struct { |
51 | /* given values */ | 52 | /* given values */ |
52 | int n; | 53 | int n; |
53 | int m1, m2; | 54 | int m1, m2; |
54 | int p1, p2; | 55 | int p1, p2; |
55 | /* derived values */ | 56 | /* derived values */ |
56 | int dot; | 57 | int dot; |
57 | int vco; | 58 | int vco; |
58 | int m; | 59 | int m; |
59 | int p; | 60 | int p; |
60 | } intel_clock_t; | 61 | } intel_clock_t; |
61 | 62 | ||
62 | typedef struct { | 63 | typedef struct { |
63 | int min, max; | 64 | int min, max; |
64 | } intel_range_t; | 65 | } intel_range_t; |
65 | 66 | ||
66 | typedef struct { | 67 | typedef struct { |
67 | int dot_limit; | 68 | int dot_limit; |
68 | int p2_slow, p2_fast; | 69 | int p2_slow, p2_fast; |
69 | } intel_p2_t; | 70 | } intel_p2_t; |
70 | 71 | ||
71 | #define INTEL_P2_NUM 2 | 72 | #define INTEL_P2_NUM 2 |
72 | typedef struct intel_limit intel_limit_t; | 73 | typedef struct intel_limit intel_limit_t; |
73 | struct intel_limit { | 74 | struct intel_limit { |
74 | intel_range_t dot, vco, n, m, m1, m2, p, p1; | 75 | intel_range_t dot, vco, n, m, m1, m2, p, p1; |
75 | intel_p2_t p2; | 76 | intel_p2_t p2; |
76 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, | 77 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, |
77 | int, int, intel_clock_t *); | 78 | int, int, intel_clock_t *); |
78 | }; | 79 | }; |
79 | 80 | ||
80 | /* FDI */ | 81 | /* FDI */ |
@@ -105,56 +106,56 @@ intel_fdi_link_freq(struct drm_device *dev) | |||
105 | } | 106 | } |
106 | 107 | ||
107 | static const intel_limit_t intel_limits_i8xx_dvo = { | 108 | static const intel_limit_t intel_limits_i8xx_dvo = { |
108 | .dot = { .min = 25000, .max = 350000 }, | 109 | .dot = { .min = 25000, .max = 350000 }, |
109 | .vco = { .min = 930000, .max = 1400000 }, | 110 | .vco = { .min = 930000, .max = 1400000 }, |
110 | .n = { .min = 3, .max = 16 }, | 111 | .n = { .min = 3, .max = 16 }, |
111 | .m = { .min = 96, .max = 140 }, | 112 | .m = { .min = 96, .max = 140 }, |
112 | .m1 = { .min = 18, .max = 26 }, | 113 | .m1 = { .min = 18, .max = 26 }, |
113 | .m2 = { .min = 6, .max = 16 }, | 114 | .m2 = { .min = 6, .max = 16 }, |
114 | .p = { .min = 4, .max = 128 }, | 115 | .p = { .min = 4, .max = 128 }, |
115 | .p1 = { .min = 2, .max = 33 }, | 116 | .p1 = { .min = 2, .max = 33 }, |
116 | .p2 = { .dot_limit = 165000, | 117 | .p2 = { .dot_limit = 165000, |
117 | .p2_slow = 4, .p2_fast = 2 }, | 118 | .p2_slow = 4, .p2_fast = 2 }, |
118 | .find_pll = intel_find_best_PLL, | 119 | .find_pll = intel_find_best_PLL, |
119 | }; | 120 | }; |
120 | 121 | ||
121 | static const intel_limit_t intel_limits_i8xx_lvds = { | 122 | static const intel_limit_t intel_limits_i8xx_lvds = { |
122 | .dot = { .min = 25000, .max = 350000 }, | 123 | .dot = { .min = 25000, .max = 350000 }, |
123 | .vco = { .min = 930000, .max = 1400000 }, | 124 | .vco = { .min = 930000, .max = 1400000 }, |
124 | .n = { .min = 3, .max = 16 }, | 125 | .n = { .min = 3, .max = 16 }, |
125 | .m = { .min = 96, .max = 140 }, | 126 | .m = { .min = 96, .max = 140 }, |
126 | .m1 = { .min = 18, .max = 26 }, | 127 | .m1 = { .min = 18, .max = 26 }, |
127 | .m2 = { .min = 6, .max = 16 }, | 128 | .m2 = { .min = 6, .max = 16 }, |
128 | .p = { .min = 4, .max = 128 }, | 129 | .p = { .min = 4, .max = 128 }, |
129 | .p1 = { .min = 1, .max = 6 }, | 130 | .p1 = { .min = 1, .max = 6 }, |
130 | .p2 = { .dot_limit = 165000, | 131 | .p2 = { .dot_limit = 165000, |
131 | .p2_slow = 14, .p2_fast = 7 }, | 132 | .p2_slow = 14, .p2_fast = 7 }, |
132 | .find_pll = intel_find_best_PLL, | 133 | .find_pll = intel_find_best_PLL, |
133 | }; | 134 | }; |
134 | 135 | ||
135 | static const intel_limit_t intel_limits_i9xx_sdvo = { | 136 | static const intel_limit_t intel_limits_i9xx_sdvo = { |
136 | .dot = { .min = 20000, .max = 400000 }, | 137 | .dot = { .min = 20000, .max = 400000 }, |
137 | .vco = { .min = 1400000, .max = 2800000 }, | 138 | .vco = { .min = 1400000, .max = 2800000 }, |
138 | .n = { .min = 1, .max = 6 }, | 139 | .n = { .min = 1, .max = 6 }, |
139 | .m = { .min = 70, .max = 120 }, | 140 | .m = { .min = 70, .max = 120 }, |
140 | .m1 = { .min = 10, .max = 22 }, | 141 | .m1 = { .min = 10, .max = 22 }, |
141 | .m2 = { .min = 5, .max = 9 }, | 142 | .m2 = { .min = 5, .max = 9 }, |
142 | .p = { .min = 5, .max = 80 }, | 143 | .p = { .min = 5, .max = 80 }, |
143 | .p1 = { .min = 1, .max = 8 }, | 144 | .p1 = { .min = 1, .max = 8 }, |
144 | .p2 = { .dot_limit = 200000, | 145 | .p2 = { .dot_limit = 200000, |
145 | .p2_slow = 10, .p2_fast = 5 }, | 146 | .p2_slow = 10, .p2_fast = 5 }, |
146 | .find_pll = intel_find_best_PLL, | 147 | .find_pll = intel_find_best_PLL, |
147 | }; | 148 | }; |
148 | 149 | ||
149 | static const intel_limit_t intel_limits_i9xx_lvds = { | 150 | static const intel_limit_t intel_limits_i9xx_lvds = { |
150 | .dot = { .min = 20000, .max = 400000 }, | 151 | .dot = { .min = 20000, .max = 400000 }, |
151 | .vco = { .min = 1400000, .max = 2800000 }, | 152 | .vco = { .min = 1400000, .max = 2800000 }, |
152 | .n = { .min = 1, .max = 6 }, | 153 | .n = { .min = 1, .max = 6 }, |
153 | .m = { .min = 70, .max = 120 }, | 154 | .m = { .min = 70, .max = 120 }, |
154 | .m1 = { .min = 10, .max = 22 }, | 155 | .m1 = { .min = 10, .max = 22 }, |
155 | .m2 = { .min = 5, .max = 9 }, | 156 | .m2 = { .min = 5, .max = 9 }, |
156 | .p = { .min = 7, .max = 98 }, | 157 | .p = { .min = 7, .max = 98 }, |
157 | .p1 = { .min = 1, .max = 8 }, | 158 | .p1 = { .min = 1, .max = 8 }, |
158 | .p2 = { .dot_limit = 112000, | 159 | .p2 = { .dot_limit = 112000, |
159 | .p2_slow = 14, .p2_fast = 7 }, | 160 | .p2_slow = 14, .p2_fast = 7 }, |
160 | .find_pll = intel_find_best_PLL, | 161 | .find_pll = intel_find_best_PLL, |
@@ -222,44 +223,44 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | |||
222 | }; | 223 | }; |
223 | 224 | ||
224 | static const intel_limit_t intel_limits_g4x_display_port = { | 225 | static const intel_limit_t intel_limits_g4x_display_port = { |
225 | .dot = { .min = 161670, .max = 227000 }, | 226 | .dot = { .min = 161670, .max = 227000 }, |
226 | .vco = { .min = 1750000, .max = 3500000}, | 227 | .vco = { .min = 1750000, .max = 3500000}, |
227 | .n = { .min = 1, .max = 2 }, | 228 | .n = { .min = 1, .max = 2 }, |
228 | .m = { .min = 97, .max = 108 }, | 229 | .m = { .min = 97, .max = 108 }, |
229 | .m1 = { .min = 0x10, .max = 0x12 }, | 230 | .m1 = { .min = 0x10, .max = 0x12 }, |
230 | .m2 = { .min = 0x05, .max = 0x06 }, | 231 | .m2 = { .min = 0x05, .max = 0x06 }, |
231 | .p = { .min = 10, .max = 20 }, | 232 | .p = { .min = 10, .max = 20 }, |
232 | .p1 = { .min = 1, .max = 2}, | 233 | .p1 = { .min = 1, .max = 2}, |
233 | .p2 = { .dot_limit = 0, | 234 | .p2 = { .dot_limit = 0, |
234 | .p2_slow = 10, .p2_fast = 10 }, | 235 | .p2_slow = 10, .p2_fast = 10 }, |
235 | .find_pll = intel_find_pll_g4x_dp, | 236 | .find_pll = intel_find_pll_g4x_dp, |
236 | }; | 237 | }; |
237 | 238 | ||
238 | static const intel_limit_t intel_limits_pineview_sdvo = { | 239 | static const intel_limit_t intel_limits_pineview_sdvo = { |
239 | .dot = { .min = 20000, .max = 400000}, | 240 | .dot = { .min = 20000, .max = 400000}, |
240 | .vco = { .min = 1700000, .max = 3500000 }, | 241 | .vco = { .min = 1700000, .max = 3500000 }, |
241 | /* Pineview's Ncounter is a ring counter */ | 242 | /* Pineview's Ncounter is a ring counter */ |
242 | .n = { .min = 3, .max = 6 }, | 243 | .n = { .min = 3, .max = 6 }, |
243 | .m = { .min = 2, .max = 256 }, | 244 | .m = { .min = 2, .max = 256 }, |
244 | /* Pineview only has one combined m divider, which we treat as m2. */ | 245 | /* Pineview only has one combined m divider, which we treat as m2. */ |
245 | .m1 = { .min = 0, .max = 0 }, | 246 | .m1 = { .min = 0, .max = 0 }, |
246 | .m2 = { .min = 0, .max = 254 }, | 247 | .m2 = { .min = 0, .max = 254 }, |
247 | .p = { .min = 5, .max = 80 }, | 248 | .p = { .min = 5, .max = 80 }, |
248 | .p1 = { .min = 1, .max = 8 }, | 249 | .p1 = { .min = 1, .max = 8 }, |
249 | .p2 = { .dot_limit = 200000, | 250 | .p2 = { .dot_limit = 200000, |
250 | .p2_slow = 10, .p2_fast = 5 }, | 251 | .p2_slow = 10, .p2_fast = 5 }, |
251 | .find_pll = intel_find_best_PLL, | 252 | .find_pll = intel_find_best_PLL, |
252 | }; | 253 | }; |
253 | 254 | ||
254 | static const intel_limit_t intel_limits_pineview_lvds = { | 255 | static const intel_limit_t intel_limits_pineview_lvds = { |
255 | .dot = { .min = 20000, .max = 400000 }, | 256 | .dot = { .min = 20000, .max = 400000 }, |
256 | .vco = { .min = 1700000, .max = 3500000 }, | 257 | .vco = { .min = 1700000, .max = 3500000 }, |
257 | .n = { .min = 3, .max = 6 }, | 258 | .n = { .min = 3, .max = 6 }, |
258 | .m = { .min = 2, .max = 256 }, | 259 | .m = { .min = 2, .max = 256 }, |
259 | .m1 = { .min = 0, .max = 0 }, | 260 | .m1 = { .min = 0, .max = 0 }, |
260 | .m2 = { .min = 0, .max = 254 }, | 261 | .m2 = { .min = 0, .max = 254 }, |
261 | .p = { .min = 7, .max = 112 }, | 262 | .p = { .min = 7, .max = 112 }, |
262 | .p1 = { .min = 1, .max = 8 }, | 263 | .p1 = { .min = 1, .max = 8 }, |
263 | .p2 = { .dot_limit = 112000, | 264 | .p2 = { .dot_limit = 112000, |
264 | .p2_slow = 14, .p2_fast = 14 }, | 265 | .p2_slow = 14, .p2_fast = 14 }, |
265 | .find_pll = intel_find_best_PLL, | 266 | .find_pll = intel_find_best_PLL, |
@@ -321,7 +322,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { | |||
321 | .m1 = { .min = 12, .max = 22 }, | 322 | .m1 = { .min = 12, .max = 22 }, |
322 | .m2 = { .min = 5, .max = 9 }, | 323 | .m2 = { .min = 5, .max = 9 }, |
323 | .p = { .min = 28, .max = 112 }, | 324 | .p = { .min = 28, .max = 112 }, |
324 | .p1 = { .min = 2,.max = 8 }, | 325 | .p1 = { .min = 2, .max = 8 }, |
325 | .p2 = { .dot_limit = 225000, | 326 | .p2 = { .dot_limit = 225000, |
326 | .p2_slow = 14, .p2_fast = 14 }, | 327 | .p2_slow = 14, .p2_fast = 14 }, |
327 | .find_pll = intel_g4x_find_best_PLL, | 328 | .find_pll = intel_g4x_find_best_PLL, |
@@ -335,24 +336,24 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { | |||
335 | .m1 = { .min = 12, .max = 22 }, | 336 | .m1 = { .min = 12, .max = 22 }, |
336 | .m2 = { .min = 5, .max = 9 }, | 337 | .m2 = { .min = 5, .max = 9 }, |
337 | .p = { .min = 14, .max = 42 }, | 338 | .p = { .min = 14, .max = 42 }, |
338 | .p1 = { .min = 2,.max = 6 }, | 339 | .p1 = { .min = 2, .max = 6 }, |
339 | .p2 = { .dot_limit = 225000, | 340 | .p2 = { .dot_limit = 225000, |
340 | .p2_slow = 7, .p2_fast = 7 }, | 341 | .p2_slow = 7, .p2_fast = 7 }, |
341 | .find_pll = intel_g4x_find_best_PLL, | 342 | .find_pll = intel_g4x_find_best_PLL, |
342 | }; | 343 | }; |
343 | 344 | ||
344 | static const intel_limit_t intel_limits_ironlake_display_port = { | 345 | static const intel_limit_t intel_limits_ironlake_display_port = { |
345 | .dot = { .min = 25000, .max = 350000 }, | 346 | .dot = { .min = 25000, .max = 350000 }, |
346 | .vco = { .min = 1760000, .max = 3510000}, | 347 | .vco = { .min = 1760000, .max = 3510000}, |
347 | .n = { .min = 1, .max = 2 }, | 348 | .n = { .min = 1, .max = 2 }, |
348 | .m = { .min = 81, .max = 90 }, | 349 | .m = { .min = 81, .max = 90 }, |
349 | .m1 = { .min = 12, .max = 22 }, | 350 | .m1 = { .min = 12, .max = 22 }, |
350 | .m2 = { .min = 5, .max = 9 }, | 351 | .m2 = { .min = 5, .max = 9 }, |
351 | .p = { .min = 10, .max = 20 }, | 352 | .p = { .min = 10, .max = 20 }, |
352 | .p1 = { .min = 1, .max = 2}, | 353 | .p1 = { .min = 1, .max = 2}, |
353 | .p2 = { .dot_limit = 0, | 354 | .p2 = { .dot_limit = 0, |
354 | .p2_slow = 10, .p2_fast = 10 }, | 355 | .p2_slow = 10, .p2_fast = 10 }, |
355 | .find_pll = intel_find_pll_ironlake_dp, | 356 | .find_pll = intel_find_pll_ironlake_dp, |
356 | }; | 357 | }; |
357 | 358 | ||
358 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, | 359 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
@@ -404,7 +405,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) | |||
404 | limit = &intel_limits_g4x_hdmi; | 405 | limit = &intel_limits_g4x_hdmi; |
405 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { | 406 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { |
406 | limit = &intel_limits_g4x_sdvo; | 407 | limit = &intel_limits_g4x_sdvo; |
407 | } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 408 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
408 | limit = &intel_limits_g4x_display_port; | 409 | limit = &intel_limits_g4x_display_port; |
409 | } else /* The option is for other outputs */ | 410 | } else /* The option is for other outputs */ |
410 | limit = &intel_limits_i9xx_sdvo; | 411 | limit = &intel_limits_i9xx_sdvo; |
@@ -488,26 +489,26 @@ static bool intel_PLL_is_valid(struct drm_device *dev, | |||
488 | const intel_clock_t *clock) | 489 | const intel_clock_t *clock) |
489 | { | 490 | { |
490 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) | 491 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
491 | INTELPllInvalid ("p1 out of range\n"); | 492 | INTELPllInvalid("p1 out of range\n"); |
492 | if (clock->p < limit->p.min || limit->p.max < clock->p) | 493 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
493 | INTELPllInvalid ("p out of range\n"); | 494 | INTELPllInvalid("p out of range\n"); |
494 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) | 495 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) |
495 | INTELPllInvalid ("m2 out of range\n"); | 496 | INTELPllInvalid("m2 out of range\n"); |
496 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) | 497 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
497 | INTELPllInvalid ("m1 out of range\n"); | 498 | INTELPllInvalid("m1 out of range\n"); |
498 | if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) | 499 | if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) |
499 | INTELPllInvalid ("m1 <= m2\n"); | 500 | INTELPllInvalid("m1 <= m2\n"); |
500 | if (clock->m < limit->m.min || limit->m.max < clock->m) | 501 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
501 | INTELPllInvalid ("m out of range\n"); | 502 | INTELPllInvalid("m out of range\n"); |
502 | if (clock->n < limit->n.min || limit->n.max < clock->n) | 503 | if (clock->n < limit->n.min || limit->n.max < clock->n) |
503 | INTELPllInvalid ("n out of range\n"); | 504 | INTELPllInvalid("n out of range\n"); |
504 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) | 505 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) |
505 | INTELPllInvalid ("vco out of range\n"); | 506 | INTELPllInvalid("vco out of range\n"); |
506 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, | 507 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, |
507 | * connector, etc., rather than just a single range. | 508 | * connector, etc., rather than just a single range. |
508 | */ | 509 | */ |
509 | if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) | 510 | if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) |
510 | INTELPllInvalid ("dot out of range\n"); | 511 | INTELPllInvalid("dot out of range\n"); |
511 | 512 | ||
512 | return true; | 513 | return true; |
513 | } | 514 | } |
@@ -542,7 +543,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
542 | clock.p2 = limit->p2.p2_fast; | 543 | clock.p2 = limit->p2.p2_fast; |
543 | } | 544 | } |
544 | 545 | ||
545 | memset (best_clock, 0, sizeof (*best_clock)); | 546 | memset(best_clock, 0, sizeof(*best_clock)); |
546 | 547 | ||
547 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; | 548 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; |
548 | clock.m1++) { | 549 | clock.m1++) { |
@@ -802,6 +803,19 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv, | |||
802 | u32 val; | 803 | u32 val; |
803 | bool cur_state; | 804 | bool cur_state; |
804 | 805 | ||
806 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
807 | u32 pch_dpll; | ||
808 | |||
809 | pch_dpll = I915_READ(PCH_DPLL_SEL); | ||
810 | |||
811 | /* Make sure the selected PLL is enabled to the transcoder */ | ||
812 | WARN(!((pch_dpll >> (4 * pipe)) & 8), | ||
813 | "transcoder %d PLL not enabled\n", pipe); | ||
814 | |||
815 | /* Convert the transcoder pipe number to a pll pipe number */ | ||
816 | pipe = (pch_dpll >> (4 * pipe)) & 1; | ||
817 | } | ||
818 | |||
805 | reg = PCH_DPLL(pipe); | 819 | reg = PCH_DPLL(pipe); |
806 | val = I915_READ(reg); | 820 | val = I915_READ(reg); |
807 | cur_state = !!(val & DPLL_VCO_ENABLE); | 821 | cur_state = !!(val & DPLL_VCO_ENABLE); |
@@ -1171,6 +1185,9 @@ static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, | |||
1171 | int reg; | 1185 | int reg; |
1172 | u32 val; | 1186 | u32 val; |
1173 | 1187 | ||
1188 | if (pipe > 1) | ||
1189 | return; | ||
1190 | |||
1174 | /* PCH only available on ILK+ */ | 1191 | /* PCH only available on ILK+ */ |
1175 | BUG_ON(dev_priv->info->gen < 5); | 1192 | BUG_ON(dev_priv->info->gen < 5); |
1176 | 1193 | ||
@@ -1191,6 +1208,9 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, | |||
1191 | int reg; | 1208 | int reg; |
1192 | u32 val; | 1209 | u32 val; |
1193 | 1210 | ||
1211 | if (pipe > 1) | ||
1212 | return; | ||
1213 | |||
1194 | /* PCH only available on ILK+ */ | 1214 | /* PCH only available on ILK+ */ |
1195 | BUG_ON(dev_priv->info->gen < 5); | 1215 | BUG_ON(dev_priv->info->gen < 5); |
1196 | 1216 | ||
@@ -1256,7 +1276,7 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv, | |||
1256 | I915_WRITE(reg, val); | 1276 | I915_WRITE(reg, val); |
1257 | /* wait for PCH transcoder off, transcoder state */ | 1277 | /* wait for PCH transcoder off, transcoder state */ |
1258 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) | 1278 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) |
1259 | DRM_ERROR("failed to disable transcoder\n"); | 1279 | DRM_ERROR("failed to disable transcoder %d\n", pipe); |
1260 | } | 1280 | } |
1261 | 1281 | ||
1262 | /** | 1282 | /** |
@@ -2085,6 +2105,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc, | |||
2085 | switch (plane) { | 2105 | switch (plane) { |
2086 | case 0: | 2106 | case 0: |
2087 | case 1: | 2107 | case 1: |
2108 | case 2: | ||
2088 | break; | 2109 | break; |
2089 | default: | 2110 | default: |
2090 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); | 2111 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
@@ -2184,6 +2205,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2184 | case 0: | 2205 | case 0: |
2185 | case 1: | 2206 | case 1: |
2186 | break; | 2207 | break; |
2208 | case 2: | ||
2209 | if (IS_IVYBRIDGE(dev)) | ||
2210 | break; | ||
2211 | /* fall through otherwise */ | ||
2187 | default: | 2212 | default: |
2188 | DRM_ERROR("no plane for crtc\n"); | 2213 | DRM_ERROR("no plane for crtc\n"); |
2189 | return -EINVAL; | 2214 | return -EINVAL; |
@@ -2440,7 +2465,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
2440 | 2465 | ||
2441 | } | 2466 | } |
2442 | 2467 | ||
2443 | static const int snb_b_fdi_train_param [] = { | 2468 | static const int snb_b_fdi_train_param[] = { |
2444 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, | 2469 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, |
2445 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, | 2470 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, |
2446 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, | 2471 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, |
@@ -2496,7 +2521,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
2496 | if (HAS_PCH_CPT(dev)) | 2521 | if (HAS_PCH_CPT(dev)) |
2497 | cpt_phase_pointer_enable(dev, pipe); | 2522 | cpt_phase_pointer_enable(dev, pipe); |
2498 | 2523 | ||
2499 | for (i = 0; i < 4; i++ ) { | 2524 | for (i = 0; i < 4; i++) { |
2500 | reg = FDI_TX_CTL(pipe); | 2525 | reg = FDI_TX_CTL(pipe); |
2501 | temp = I915_READ(reg); | 2526 | temp = I915_READ(reg); |
2502 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2527 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
@@ -2545,7 +2570,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
2545 | POSTING_READ(reg); | 2570 | POSTING_READ(reg); |
2546 | udelay(150); | 2571 | udelay(150); |
2547 | 2572 | ||
2548 | for (i = 0; i < 4; i++ ) { | 2573 | for (i = 0; i < 4; i++) { |
2549 | reg = FDI_TX_CTL(pipe); | 2574 | reg = FDI_TX_CTL(pipe); |
2550 | temp = I915_READ(reg); | 2575 | temp = I915_READ(reg); |
2551 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2576 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
@@ -2600,6 +2625,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2600 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; | 2625 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; |
2601 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2626 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2602 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | 2627 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2628 | temp |= FDI_COMPOSITE_SYNC; | ||
2603 | I915_WRITE(reg, temp | FDI_TX_ENABLE); | 2629 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2604 | 2630 | ||
2605 | reg = FDI_RX_CTL(pipe); | 2631 | reg = FDI_RX_CTL(pipe); |
@@ -2607,6 +2633,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2607 | temp &= ~FDI_LINK_TRAIN_AUTO; | 2633 | temp &= ~FDI_LINK_TRAIN_AUTO; |
2608 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2634 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2609 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | 2635 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2636 | temp |= FDI_COMPOSITE_SYNC; | ||
2610 | I915_WRITE(reg, temp | FDI_RX_ENABLE); | 2637 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2611 | 2638 | ||
2612 | POSTING_READ(reg); | 2639 | POSTING_READ(reg); |
@@ -2615,7 +2642,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2615 | if (HAS_PCH_CPT(dev)) | 2642 | if (HAS_PCH_CPT(dev)) |
2616 | cpt_phase_pointer_enable(dev, pipe); | 2643 | cpt_phase_pointer_enable(dev, pipe); |
2617 | 2644 | ||
2618 | for (i = 0; i < 4; i++ ) { | 2645 | for (i = 0; i < 4; i++) { |
2619 | reg = FDI_TX_CTL(pipe); | 2646 | reg = FDI_TX_CTL(pipe); |
2620 | temp = I915_READ(reg); | 2647 | temp = I915_READ(reg); |
2621 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2648 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
@@ -2657,7 +2684,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2657 | POSTING_READ(reg); | 2684 | POSTING_READ(reg); |
2658 | udelay(150); | 2685 | udelay(150); |
2659 | 2686 | ||
2660 | for (i = 0; i < 4; i++ ) { | 2687 | for (i = 0; i < 4; i++) { |
2661 | reg = FDI_TX_CTL(pipe); | 2688 | reg = FDI_TX_CTL(pipe); |
2662 | temp = I915_READ(reg); | 2689 | temp = I915_READ(reg); |
2663 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2690 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
@@ -2866,7 +2893,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
2866 | struct drm_i915_private *dev_priv = dev->dev_private; | 2893 | struct drm_i915_private *dev_priv = dev->dev_private; |
2867 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2894 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2868 | int pipe = intel_crtc->pipe; | 2895 | int pipe = intel_crtc->pipe; |
2869 | u32 reg, temp; | 2896 | u32 reg, temp, transc_sel; |
2870 | 2897 | ||
2871 | /* For PCH output, training FDI link */ | 2898 | /* For PCH output, training FDI link */ |
2872 | dev_priv->display.fdi_link_train(crtc); | 2899 | dev_priv->display.fdi_link_train(crtc); |
@@ -2874,12 +2901,21 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
2874 | intel_enable_pch_pll(dev_priv, pipe); | 2901 | intel_enable_pch_pll(dev_priv, pipe); |
2875 | 2902 | ||
2876 | if (HAS_PCH_CPT(dev)) { | 2903 | if (HAS_PCH_CPT(dev)) { |
2904 | transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL : | ||
2905 | TRANSC_DPLLB_SEL; | ||
2906 | |||
2877 | /* Be sure PCH DPLL SEL is set */ | 2907 | /* Be sure PCH DPLL SEL is set */ |
2878 | temp = I915_READ(PCH_DPLL_SEL); | 2908 | temp = I915_READ(PCH_DPLL_SEL); |
2879 | if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0) | 2909 | if (pipe == 0) { |
2910 | temp &= ~(TRANSA_DPLLB_SEL); | ||
2880 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | 2911 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); |
2881 | else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0) | 2912 | } else if (pipe == 1) { |
2913 | temp &= ~(TRANSB_DPLLB_SEL); | ||
2882 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | 2914 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
2915 | } else if (pipe == 2) { | ||
2916 | temp &= ~(TRANSC_DPLLB_SEL); | ||
2917 | temp |= (TRANSC_DPLL_ENABLE | transc_sel); | ||
2918 | } | ||
2883 | I915_WRITE(PCH_DPLL_SEL, temp); | 2919 | I915_WRITE(PCH_DPLL_SEL, temp); |
2884 | } | 2920 | } |
2885 | 2921 | ||
@@ -2935,6 +2971,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
2935 | intel_enable_transcoder(dev_priv, pipe); | 2971 | intel_enable_transcoder(dev_priv, pipe); |
2936 | } | 2972 | } |
2937 | 2973 | ||
2974 | void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) | ||
2975 | { | ||
2976 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2977 | int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); | ||
2978 | u32 temp; | ||
2979 | |||
2980 | temp = I915_READ(dslreg); | ||
2981 | udelay(500); | ||
2982 | if (wait_for(I915_READ(dslreg) != temp, 5)) { | ||
2983 | /* Without this, mode sets may fail silently on FDI */ | ||
2984 | I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS); | ||
2985 | udelay(250); | ||
2986 | I915_WRITE(tc2reg, 0); | ||
2987 | if (wait_for(I915_READ(dslreg) != temp, 5)) | ||
2988 | DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); | ||
2989 | } | ||
2990 | } | ||
2991 | |||
2938 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 2992 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2939 | { | 2993 | { |
2940 | struct drm_device *dev = crtc->dev; | 2994 | struct drm_device *dev = crtc->dev; |
@@ -3045,13 +3099,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
3045 | temp = I915_READ(PCH_DPLL_SEL); | 3099 | temp = I915_READ(PCH_DPLL_SEL); |
3046 | switch (pipe) { | 3100 | switch (pipe) { |
3047 | case 0: | 3101 | case 0: |
3048 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | 3102 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); |
3049 | break; | 3103 | break; |
3050 | case 1: | 3104 | case 1: |
3051 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | 3105 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
3052 | break; | 3106 | break; |
3053 | case 2: | 3107 | case 2: |
3054 | /* FIXME: manage transcoder PLLs? */ | 3108 | /* C shares PLL A or B */ |
3055 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); | 3109 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); |
3056 | break; | 3110 | break; |
3057 | default: | 3111 | default: |
@@ -3061,7 +3115,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
3061 | } | 3115 | } |
3062 | 3116 | ||
3063 | /* disable PCH DPLL */ | 3117 | /* disable PCH DPLL */ |
3064 | intel_disable_pch_pll(dev_priv, pipe); | 3118 | if (!intel_crtc->no_pll) |
3119 | intel_disable_pch_pll(dev_priv, pipe); | ||
3065 | 3120 | ||
3066 | /* Switch from PCDclk to Rawclk */ | 3121 | /* Switch from PCDclk to Rawclk */ |
3067 | reg = FDI_RX_CTL(pipe); | 3122 | reg = FDI_RX_CTL(pipe); |
@@ -3293,18 +3348,25 @@ static void ironlake_crtc_commit(struct drm_crtc *crtc) | |||
3293 | ironlake_crtc_enable(crtc); | 3348 | ironlake_crtc_enable(crtc); |
3294 | } | 3349 | } |
3295 | 3350 | ||
3296 | void intel_encoder_prepare (struct drm_encoder *encoder) | 3351 | void intel_encoder_prepare(struct drm_encoder *encoder) |
3297 | { | 3352 | { |
3298 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 3353 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
3299 | /* lvds has its own version of prepare see intel_lvds_prepare */ | 3354 | /* lvds has its own version of prepare see intel_lvds_prepare */ |
3300 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); | 3355 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); |
3301 | } | 3356 | } |
3302 | 3357 | ||
3303 | void intel_encoder_commit (struct drm_encoder *encoder) | 3358 | void intel_encoder_commit(struct drm_encoder *encoder) |
3304 | { | 3359 | { |
3305 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 3360 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
3361 | struct drm_device *dev = encoder->dev; | ||
3362 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | ||
3363 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc); | ||
3364 | |||
3306 | /* lvds has its own version of commit see intel_lvds_commit */ | 3365 | /* lvds has its own version of commit see intel_lvds_commit */ |
3307 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | 3366 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
3367 | |||
3368 | if (HAS_PCH_CPT(dev)) | ||
3369 | intel_cpt_verify_modeset(dev, intel_crtc->pipe); | ||
3308 | } | 3370 | } |
3309 | 3371 | ||
3310 | void intel_encoder_destroy(struct drm_encoder *encoder) | 3372 | void intel_encoder_destroy(struct drm_encoder *encoder) |
@@ -4478,6 +4540,20 @@ static void sandybridge_update_wm(struct drm_device *dev) | |||
4478 | enabled |= 2; | 4540 | enabled |= 2; |
4479 | } | 4541 | } |
4480 | 4542 | ||
4543 | /* IVB has 3 pipes */ | ||
4544 | if (IS_IVYBRIDGE(dev) && | ||
4545 | g4x_compute_wm0(dev, 2, | ||
4546 | &sandybridge_display_wm_info, latency, | ||
4547 | &sandybridge_cursor_wm_info, latency, | ||
4548 | &plane_wm, &cursor_wm)) { | ||
4549 | I915_WRITE(WM0_PIPEC_IVB, | ||
4550 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
4551 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" | ||
4552 | " plane %d, cursor: %d\n", | ||
4553 | plane_wm, cursor_wm); | ||
4554 | enabled |= 3; | ||
4555 | } | ||
4556 | |||
4481 | /* | 4557 | /* |
4482 | * Calculate and update the self-refresh watermark only when one | 4558 | * Calculate and update the self-refresh watermark only when one |
4483 | * display plane is used. | 4559 | * display plane is used. |
@@ -4584,7 +4660,9 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
4584 | 4660 | ||
4585 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) | 4661 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
4586 | { | 4662 | { |
4587 | return dev_priv->lvds_use_ssc && i915_panel_use_ssc | 4663 | if (i915_panel_use_ssc >= 0) |
4664 | return i915_panel_use_ssc != 0; | ||
4665 | return dev_priv->lvds_use_ssc | ||
4588 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); | 4666 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); |
4589 | } | 4667 | } |
4590 | 4668 | ||
@@ -5107,36 +5185,52 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
5107 | return ret; | 5185 | return ret; |
5108 | } | 5186 | } |
5109 | 5187 | ||
5110 | static void ironlake_update_pch_refclk(struct drm_device *dev) | 5188 | /* |
5189 | * Initialize reference clocks when the driver loads | ||
5190 | */ | ||
5191 | void ironlake_init_pch_refclk(struct drm_device *dev) | ||
5111 | { | 5192 | { |
5112 | struct drm_i915_private *dev_priv = dev->dev_private; | 5193 | struct drm_i915_private *dev_priv = dev->dev_private; |
5113 | struct drm_mode_config *mode_config = &dev->mode_config; | 5194 | struct drm_mode_config *mode_config = &dev->mode_config; |
5114 | struct drm_crtc *crtc; | ||
5115 | struct intel_encoder *encoder; | 5195 | struct intel_encoder *encoder; |
5116 | struct intel_encoder *has_edp_encoder = NULL; | ||
5117 | u32 temp; | 5196 | u32 temp; |
5118 | bool has_lvds = false; | 5197 | bool has_lvds = false; |
5198 | bool has_cpu_edp = false; | ||
5199 | bool has_pch_edp = false; | ||
5200 | bool has_panel = false; | ||
5201 | bool has_ck505 = false; | ||
5202 | bool can_ssc = false; | ||
5119 | 5203 | ||
5120 | /* We need to take the global config into account */ | 5204 | /* We need to take the global config into account */ |
5121 | list_for_each_entry(crtc, &mode_config->crtc_list, head) { | 5205 | list_for_each_entry(encoder, &mode_config->encoder_list, |
5122 | if (!crtc->enabled) | 5206 | base.head) { |
5123 | continue; | 5207 | switch (encoder->type) { |
5124 | 5208 | case INTEL_OUTPUT_LVDS: | |
5125 | list_for_each_entry(encoder, &mode_config->encoder_list, | 5209 | has_panel = true; |
5126 | base.head) { | 5210 | has_lvds = true; |
5127 | if (encoder->base.crtc != crtc) | 5211 | break; |
5128 | continue; | 5212 | case INTEL_OUTPUT_EDP: |
5129 | 5213 | has_panel = true; | |
5130 | switch (encoder->type) { | 5214 | if (intel_encoder_is_pch_edp(&encoder->base)) |
5131 | case INTEL_OUTPUT_LVDS: | 5215 | has_pch_edp = true; |
5132 | has_lvds = true; | 5216 | else |
5133 | case INTEL_OUTPUT_EDP: | 5217 | has_cpu_edp = true; |
5134 | has_edp_encoder = encoder; | 5218 | break; |
5135 | break; | ||
5136 | } | ||
5137 | } | 5219 | } |
5138 | } | 5220 | } |
5139 | 5221 | ||
5222 | if (HAS_PCH_IBX(dev)) { | ||
5223 | has_ck505 = dev_priv->display_clock_mode; | ||
5224 | can_ssc = has_ck505; | ||
5225 | } else { | ||
5226 | has_ck505 = false; | ||
5227 | can_ssc = true; | ||
5228 | } | ||
5229 | |||
5230 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", | ||
5231 | has_panel, has_lvds, has_pch_edp, has_cpu_edp, | ||
5232 | has_ck505); | ||
5233 | |||
5140 | /* Ironlake: try to setup display ref clock before DPLL | 5234 | /* Ironlake: try to setup display ref clock before DPLL |
5141 | * enabling. This is only under driver's control after | 5235 | * enabling. This is only under driver's control after |
5142 | * PCH B stepping, previous chipset stepping should be | 5236 | * PCH B stepping, previous chipset stepping should be |
@@ -5145,43 +5239,102 @@ static void ironlake_update_pch_refclk(struct drm_device *dev) | |||
5145 | temp = I915_READ(PCH_DREF_CONTROL); | 5239 | temp = I915_READ(PCH_DREF_CONTROL); |
5146 | /* Always enable nonspread source */ | 5240 | /* Always enable nonspread source */ |
5147 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | 5241 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; |
5148 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
5149 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
5150 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
5151 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5152 | 5242 | ||
5153 | POSTING_READ(PCH_DREF_CONTROL); | 5243 | if (has_ck505) |
5154 | udelay(200); | 5244 | temp |= DREF_NONSPREAD_CK505_ENABLE; |
5245 | else | ||
5246 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
5155 | 5247 | ||
5156 | if (has_edp_encoder) { | 5248 | if (has_panel) { |
5157 | if (intel_panel_use_ssc(dev_priv)) { | 5249 | temp &= ~DREF_SSC_SOURCE_MASK; |
5158 | temp |= DREF_SSC1_ENABLE; | 5250 | temp |= DREF_SSC_SOURCE_ENABLE; |
5159 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5160 | 5251 | ||
5161 | POSTING_READ(PCH_DREF_CONTROL); | 5252 | /* SSC must be turned on before enabling the CPU output */ |
5162 | udelay(200); | 5253 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
5254 | DRM_DEBUG_KMS("Using SSC on panel\n"); | ||
5255 | temp |= DREF_SSC1_ENABLE; | ||
5163 | } | 5256 | } |
5257 | |||
5258 | /* Get SSC going before enabling the outputs */ | ||
5259 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5260 | POSTING_READ(PCH_DREF_CONTROL); | ||
5261 | udelay(200); | ||
5262 | |||
5164 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | 5263 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
5165 | 5264 | ||
5166 | /* Enable CPU source on CPU attached eDP */ | 5265 | /* Enable CPU source on CPU attached eDP */ |
5167 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 5266 | if (has_cpu_edp) { |
5168 | if (intel_panel_use_ssc(dev_priv)) | 5267 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
5268 | DRM_DEBUG_KMS("Using SSC on eDP\n"); | ||
5169 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | 5269 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; |
5270 | } | ||
5170 | else | 5271 | else |
5171 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 5272 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
5172 | } else { | 5273 | } else |
5173 | /* Enable SSC on PCH eDP if needed */ | 5274 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
5174 | if (intel_panel_use_ssc(dev_priv)) { | 5275 | |
5175 | DRM_ERROR("enabling SSC on PCH\n"); | 5276 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5176 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | 5277 | POSTING_READ(PCH_DREF_CONTROL); |
5177 | } | 5278 | udelay(200); |
5178 | } | 5279 | } else { |
5280 | DRM_DEBUG_KMS("Disabling SSC entirely\n"); | ||
5281 | |||
5282 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
5283 | |||
5284 | /* Turn off CPU output */ | ||
5285 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | ||
5286 | |||
5287 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5288 | POSTING_READ(PCH_DREF_CONTROL); | ||
5289 | udelay(200); | ||
5290 | |||
5291 | /* Turn off the SSC source */ | ||
5292 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
5293 | temp |= DREF_SSC_SOURCE_DISABLE; | ||
5294 | |||
5295 | /* Turn off SSC1 */ | ||
5296 | temp &= ~ DREF_SSC1_ENABLE; | ||
5297 | |||
5179 | I915_WRITE(PCH_DREF_CONTROL, temp); | 5298 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5180 | POSTING_READ(PCH_DREF_CONTROL); | 5299 | POSTING_READ(PCH_DREF_CONTROL); |
5181 | udelay(200); | 5300 | udelay(200); |
5182 | } | 5301 | } |
5183 | } | 5302 | } |
5184 | 5303 | ||
5304 | static int ironlake_get_refclk(struct drm_crtc *crtc) | ||
5305 | { | ||
5306 | struct drm_device *dev = crtc->dev; | ||
5307 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5308 | struct intel_encoder *encoder; | ||
5309 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
5310 | struct intel_encoder *edp_encoder = NULL; | ||
5311 | int num_connectors = 0; | ||
5312 | bool is_lvds = false; | ||
5313 | |||
5314 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
5315 | if (encoder->base.crtc != crtc) | ||
5316 | continue; | ||
5317 | |||
5318 | switch (encoder->type) { | ||
5319 | case INTEL_OUTPUT_LVDS: | ||
5320 | is_lvds = true; | ||
5321 | break; | ||
5322 | case INTEL_OUTPUT_EDP: | ||
5323 | edp_encoder = encoder; | ||
5324 | break; | ||
5325 | } | ||
5326 | num_connectors++; | ||
5327 | } | ||
5328 | |||
5329 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | ||
5330 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | ||
5331 | dev_priv->lvds_ssc_freq); | ||
5332 | return dev_priv->lvds_ssc_freq * 1000; | ||
5333 | } | ||
5334 | |||
5335 | return 120000; | ||
5336 | } | ||
5337 | |||
5185 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | 5338 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
5186 | struct drm_display_mode *mode, | 5339 | struct drm_display_mode *mode, |
5187 | struct drm_display_mode *adjusted_mode, | 5340 | struct drm_display_mode *adjusted_mode, |
@@ -5241,16 +5394,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5241 | num_connectors++; | 5394 | num_connectors++; |
5242 | } | 5395 | } |
5243 | 5396 | ||
5244 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | 5397 | refclk = ironlake_get_refclk(crtc); |
5245 | refclk = dev_priv->lvds_ssc_freq * 1000; | ||
5246 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | ||
5247 | refclk / 1000); | ||
5248 | } else { | ||
5249 | refclk = 96000; | ||
5250 | if (!has_edp_encoder || | ||
5251 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) | ||
5252 | refclk = 120000; /* 120Mhz refclk */ | ||
5253 | } | ||
5254 | 5398 | ||
5255 | /* | 5399 | /* |
5256 | * Returns a set of divisors for the desired target clock with the given | 5400 | * Returns a set of divisors for the desired target clock with the given |
@@ -5377,8 +5521,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5377 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, | 5521 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, |
5378 | &m_n); | 5522 | &m_n); |
5379 | 5523 | ||
5380 | ironlake_update_pch_refclk(dev); | ||
5381 | |||
5382 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | 5524 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
5383 | if (has_reduced_clock) | 5525 | if (has_reduced_clock) |
5384 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | 5526 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
@@ -5450,39 +5592,32 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5450 | /* Set up the display plane register */ | 5592 | /* Set up the display plane register */ |
5451 | dspcntr = DISPPLANE_GAMMA_ENABLE; | 5593 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
5452 | 5594 | ||
5453 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | 5595 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
5454 | drm_mode_debug_printmodeline(mode); | 5596 | drm_mode_debug_printmodeline(mode); |
5455 | 5597 | ||
5456 | /* PCH eDP needs FDI, but CPU eDP does not */ | 5598 | /* PCH eDP needs FDI, but CPU eDP does not */ |
5457 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 5599 | if (!intel_crtc->no_pll) { |
5458 | I915_WRITE(PCH_FP0(pipe), fp); | 5600 | if (!has_edp_encoder || |
5459 | I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); | 5601 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5460 | 5602 | I915_WRITE(PCH_FP0(pipe), fp); | |
5461 | POSTING_READ(PCH_DPLL(pipe)); | 5603 | I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
5462 | udelay(150); | ||
5463 | } | ||
5464 | 5604 | ||
5465 | /* enable transcoder DPLL */ | 5605 | POSTING_READ(PCH_DPLL(pipe)); |
5466 | if (HAS_PCH_CPT(dev)) { | 5606 | udelay(150); |
5467 | temp = I915_READ(PCH_DPLL_SEL); | 5607 | } |
5468 | switch (pipe) { | 5608 | } else { |
5469 | case 0: | 5609 | if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) && |
5470 | temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; | 5610 | fp == I915_READ(PCH_FP0(0))) { |
5471 | break; | 5611 | intel_crtc->use_pll_a = true; |
5472 | case 1: | 5612 | DRM_DEBUG_KMS("using pipe a dpll\n"); |
5473 | temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; | 5613 | } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) && |
5474 | break; | 5614 | fp == I915_READ(PCH_FP0(1))) { |
5475 | case 2: | 5615 | intel_crtc->use_pll_a = false; |
5476 | /* FIXME: manage transcoder PLLs? */ | 5616 | DRM_DEBUG_KMS("using pipe b dpll\n"); |
5477 | temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL; | 5617 | } else { |
5478 | break; | 5618 | DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n"); |
5479 | default: | 5619 | return -EINVAL; |
5480 | BUG(); | ||
5481 | } | 5620 | } |
5482 | I915_WRITE(PCH_DPLL_SEL, temp); | ||
5483 | |||
5484 | POSTING_READ(PCH_DPLL_SEL); | ||
5485 | udelay(150); | ||
5486 | } | 5621 | } |
5487 | 5622 | ||
5488 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 5623 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
@@ -5492,17 +5627,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5492 | if (is_lvds) { | 5627 | if (is_lvds) { |
5493 | temp = I915_READ(PCH_LVDS); | 5628 | temp = I915_READ(PCH_LVDS); |
5494 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; | 5629 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
5495 | if (pipe == 1) { | 5630 | if (HAS_PCH_CPT(dev)) |
5496 | if (HAS_PCH_CPT(dev)) | 5631 | temp |= PORT_TRANS_SEL_CPT(pipe); |
5497 | temp |= PORT_TRANS_B_SEL_CPT; | 5632 | else if (pipe == 1) |
5498 | else | 5633 | temp |= LVDS_PIPEB_SELECT; |
5499 | temp |= LVDS_PIPEB_SELECT; | 5634 | else |
5500 | } else { | 5635 | temp &= ~LVDS_PIPEB_SELECT; |
5501 | if (HAS_PCH_CPT(dev)) | 5636 | |
5502 | temp &= ~PORT_TRANS_SEL_MASK; | ||
5503 | else | ||
5504 | temp &= ~LVDS_PIPEB_SELECT; | ||
5505 | } | ||
5506 | /* set the corresponsding LVDS_BORDER bit */ | 5637 | /* set the corresponsding LVDS_BORDER bit */ |
5507 | temp |= dev_priv->lvds_border_bits; | 5638 | temp |= dev_priv->lvds_border_bits; |
5508 | /* Set the B0-B3 data pairs corresponding to whether we're going to | 5639 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
@@ -5552,8 +5683,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5552 | I915_WRITE(TRANSDPLINK_N1(pipe), 0); | 5683 | I915_WRITE(TRANSDPLINK_N1(pipe), 0); |
5553 | } | 5684 | } |
5554 | 5685 | ||
5555 | if (!has_edp_encoder || | 5686 | if (!intel_crtc->no_pll && |
5556 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 5687 | (!has_edp_encoder || |
5688 | intel_encoder_is_pch_edp(&has_edp_encoder->base))) { | ||
5557 | I915_WRITE(PCH_DPLL(pipe), dpll); | 5689 | I915_WRITE(PCH_DPLL(pipe), dpll); |
5558 | 5690 | ||
5559 | /* Wait for the clocks to stabilize. */ | 5691 | /* Wait for the clocks to stabilize. */ |
@@ -5569,18 +5701,20 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5569 | } | 5701 | } |
5570 | 5702 | ||
5571 | intel_crtc->lowfreq_avail = false; | 5703 | intel_crtc->lowfreq_avail = false; |
5572 | if (is_lvds && has_reduced_clock && i915_powersave) { | 5704 | if (!intel_crtc->no_pll) { |
5573 | I915_WRITE(PCH_FP1(pipe), fp2); | 5705 | if (is_lvds && has_reduced_clock && i915_powersave) { |
5574 | intel_crtc->lowfreq_avail = true; | 5706 | I915_WRITE(PCH_FP1(pipe), fp2); |
5575 | if (HAS_PIPE_CXSR(dev)) { | 5707 | intel_crtc->lowfreq_avail = true; |
5576 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); | 5708 | if (HAS_PIPE_CXSR(dev)) { |
5577 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | 5709 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
5578 | } | 5710 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
5579 | } else { | 5711 | } |
5580 | I915_WRITE(PCH_FP1(pipe), fp); | 5712 | } else { |
5581 | if (HAS_PIPE_CXSR(dev)) { | 5713 | I915_WRITE(PCH_FP1(pipe), fp); |
5582 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); | 5714 | if (HAS_PIPE_CXSR(dev)) { |
5583 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | 5715 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
5716 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | ||
5717 | } | ||
5584 | } | 5718 | } |
5585 | } | 5719 | } |
5586 | 5720 | ||
@@ -5677,6 +5811,131 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
5677 | return ret; | 5811 | return ret; |
5678 | } | 5812 | } |
5679 | 5813 | ||
5814 | static void g4x_write_eld(struct drm_connector *connector, | ||
5815 | struct drm_crtc *crtc) | ||
5816 | { | ||
5817 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
5818 | uint8_t *eld = connector->eld; | ||
5819 | uint32_t eldv; | ||
5820 | uint32_t len; | ||
5821 | uint32_t i; | ||
5822 | |||
5823 | i = I915_READ(G4X_AUD_VID_DID); | ||
5824 | |||
5825 | if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) | ||
5826 | eldv = G4X_ELDV_DEVCL_DEVBLC; | ||
5827 | else | ||
5828 | eldv = G4X_ELDV_DEVCTG; | ||
5829 | |||
5830 | i = I915_READ(G4X_AUD_CNTL_ST); | ||
5831 | i &= ~(eldv | G4X_ELD_ADDR); | ||
5832 | len = (i >> 9) & 0x1f; /* ELD buffer size */ | ||
5833 | I915_WRITE(G4X_AUD_CNTL_ST, i); | ||
5834 | |||
5835 | if (!eld[0]) | ||
5836 | return; | ||
5837 | |||
5838 | len = min_t(uint8_t, eld[2], len); | ||
5839 | DRM_DEBUG_DRIVER("ELD size %d\n", len); | ||
5840 | for (i = 0; i < len; i++) | ||
5841 | I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); | ||
5842 | |||
5843 | i = I915_READ(G4X_AUD_CNTL_ST); | ||
5844 | i |= eldv; | ||
5845 | I915_WRITE(G4X_AUD_CNTL_ST, i); | ||
5846 | } | ||
5847 | |||
5848 | static void ironlake_write_eld(struct drm_connector *connector, | ||
5849 | struct drm_crtc *crtc) | ||
5850 | { | ||
5851 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
5852 | uint8_t *eld = connector->eld; | ||
5853 | uint32_t eldv; | ||
5854 | uint32_t i; | ||
5855 | int len; | ||
5856 | int hdmiw_hdmiedid; | ||
5857 | int aud_cntl_st; | ||
5858 | int aud_cntrl_st2; | ||
5859 | |||
5860 | if (IS_IVYBRIDGE(connector->dev)) { | ||
5861 | hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A; | ||
5862 | aud_cntl_st = GEN7_AUD_CNTRL_ST_A; | ||
5863 | aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2; | ||
5864 | } else { | ||
5865 | hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A; | ||
5866 | aud_cntl_st = GEN5_AUD_CNTL_ST_A; | ||
5867 | aud_cntrl_st2 = GEN5_AUD_CNTL_ST2; | ||
5868 | } | ||
5869 | |||
5870 | i = to_intel_crtc(crtc)->pipe; | ||
5871 | hdmiw_hdmiedid += i * 0x100; | ||
5872 | aud_cntl_st += i * 0x100; | ||
5873 | |||
5874 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); | ||
5875 | |||
5876 | i = I915_READ(aud_cntl_st); | ||
5877 | i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ | ||
5878 | if (!i) { | ||
5879 | DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); | ||
5880 | /* operate blindly on all ports */ | ||
5881 | eldv = GEN5_ELD_VALIDB; | ||
5882 | eldv |= GEN5_ELD_VALIDB << 4; | ||
5883 | eldv |= GEN5_ELD_VALIDB << 8; | ||
5884 | } else { | ||
5885 | DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i); | ||
5886 | eldv = GEN5_ELD_VALIDB << ((i - 1) * 4); | ||
5887 | } | ||
5888 | |||
5889 | i = I915_READ(aud_cntrl_st2); | ||
5890 | i &= ~eldv; | ||
5891 | I915_WRITE(aud_cntrl_st2, i); | ||
5892 | |||
5893 | if (!eld[0]) | ||
5894 | return; | ||
5895 | |||
5896 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | ||
5897 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); | ||
5898 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ | ||
5899 | } | ||
5900 | |||
5901 | i = I915_READ(aud_cntl_st); | ||
5902 | i &= ~GEN5_ELD_ADDRESS; | ||
5903 | I915_WRITE(aud_cntl_st, i); | ||
5904 | |||
5905 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ | ||
5906 | DRM_DEBUG_DRIVER("ELD size %d\n", len); | ||
5907 | for (i = 0; i < len; i++) | ||
5908 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); | ||
5909 | |||
5910 | i = I915_READ(aud_cntrl_st2); | ||
5911 | i |= eldv; | ||
5912 | I915_WRITE(aud_cntrl_st2, i); | ||
5913 | } | ||
5914 | |||
5915 | void intel_write_eld(struct drm_encoder *encoder, | ||
5916 | struct drm_display_mode *mode) | ||
5917 | { | ||
5918 | struct drm_crtc *crtc = encoder->crtc; | ||
5919 | struct drm_connector *connector; | ||
5920 | struct drm_device *dev = encoder->dev; | ||
5921 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5922 | |||
5923 | connector = drm_select_eld(encoder, mode); | ||
5924 | if (!connector) | ||
5925 | return; | ||
5926 | |||
5927 | DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", | ||
5928 | connector->base.id, | ||
5929 | drm_get_connector_name(connector), | ||
5930 | connector->encoder->base.id, | ||
5931 | drm_get_encoder_name(connector->encoder)); | ||
5932 | |||
5933 | connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; | ||
5934 | |||
5935 | if (dev_priv->display.write_eld) | ||
5936 | dev_priv->display.write_eld(connector, crtc); | ||
5937 | } | ||
5938 | |||
5680 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ | 5939 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ |
5681 | void intel_crtc_load_lut(struct drm_crtc *crtc) | 5940 | void intel_crtc_load_lut(struct drm_crtc *crtc) |
5682 | { | 5941 | { |
@@ -5758,6 +6017,31 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
5758 | I915_WRITE(CURBASE(pipe), base); | 6017 | I915_WRITE(CURBASE(pipe), base); |
5759 | } | 6018 | } |
5760 | 6019 | ||
6020 | static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) | ||
6021 | { | ||
6022 | struct drm_device *dev = crtc->dev; | ||
6023 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6024 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6025 | int pipe = intel_crtc->pipe; | ||
6026 | bool visible = base != 0; | ||
6027 | |||
6028 | if (intel_crtc->cursor_visible != visible) { | ||
6029 | uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); | ||
6030 | if (base) { | ||
6031 | cntl &= ~CURSOR_MODE; | ||
6032 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | ||
6033 | } else { | ||
6034 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | ||
6035 | cntl |= CURSOR_MODE_DISABLE; | ||
6036 | } | ||
6037 | I915_WRITE(CURCNTR_IVB(pipe), cntl); | ||
6038 | |||
6039 | intel_crtc->cursor_visible = visible; | ||
6040 | } | ||
6041 | /* and commit changes on next vblank */ | ||
6042 | I915_WRITE(CURBASE_IVB(pipe), base); | ||
6043 | } | ||
6044 | |||
5761 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ | 6045 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ |
5762 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, | 6046 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, |
5763 | bool on) | 6047 | bool on) |
@@ -5805,11 +6089,16 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
5805 | if (!visible && !intel_crtc->cursor_visible) | 6089 | if (!visible && !intel_crtc->cursor_visible) |
5806 | return; | 6090 | return; |
5807 | 6091 | ||
5808 | I915_WRITE(CURPOS(pipe), pos); | 6092 | if (IS_IVYBRIDGE(dev)) { |
5809 | if (IS_845G(dev) || IS_I865G(dev)) | 6093 | I915_WRITE(CURPOS_IVB(pipe), pos); |
5810 | i845_update_cursor(crtc, base); | 6094 | ivb_update_cursor(crtc, base); |
5811 | else | 6095 | } else { |
5812 | i9xx_update_cursor(crtc, base); | 6096 | I915_WRITE(CURPOS(pipe), pos); |
6097 | if (IS_845G(dev) || IS_I865G(dev)) | ||
6098 | i845_update_cursor(crtc, base); | ||
6099 | else | ||
6100 | i9xx_update_cursor(crtc, base); | ||
6101 | } | ||
5813 | 6102 | ||
5814 | if (visible) | 6103 | if (visible) |
5815 | intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); | 6104 | intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); |
@@ -7071,6 +7360,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
7071 | intel_crtc->bpp = 24; /* default for pre-Ironlake */ | 7360 | intel_crtc->bpp = 24; /* default for pre-Ironlake */ |
7072 | 7361 | ||
7073 | if (HAS_PCH_SPLIT(dev)) { | 7362 | if (HAS_PCH_SPLIT(dev)) { |
7363 | if (pipe == 2 && IS_IVYBRIDGE(dev)) | ||
7364 | intel_crtc->no_pll = true; | ||
7074 | intel_helper_funcs.prepare = ironlake_crtc_prepare; | 7365 | intel_helper_funcs.prepare = ironlake_crtc_prepare; |
7075 | intel_helper_funcs.commit = ironlake_crtc_commit; | 7366 | intel_helper_funcs.commit = ironlake_crtc_commit; |
7076 | } else { | 7367 | } else { |
@@ -7250,6 +7541,9 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
7250 | 7541 | ||
7251 | /* disable all the possible outputs/crtcs before entering KMS mode */ | 7542 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
7252 | drm_helper_disable_unused_functions(dev); | 7543 | drm_helper_disable_unused_functions(dev); |
7544 | |||
7545 | if (HAS_PCH_SPLIT(dev)) | ||
7546 | ironlake_init_pch_refclk(dev); | ||
7253 | } | 7547 | } |
7254 | 7548 | ||
7255 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | 7549 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) |
@@ -7494,6 +7788,10 @@ void gen6_disable_rps(struct drm_device *dev) | |||
7494 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | 7788 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); |
7495 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | 7789 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); |
7496 | I915_WRITE(GEN6_PMIER, 0); | 7790 | I915_WRITE(GEN6_PMIER, 0); |
7791 | /* Complete PM interrupt masking here doesn't race with the rps work | ||
7792 | * item again unmasking PM interrupts because that is using a different | ||
7793 | * register (PMIMR) to mask PM interrupts. The only risk is in leaving | ||
7794 | * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ | ||
7497 | 7795 | ||
7498 | spin_lock_irq(&dev_priv->rps_lock); | 7796 | spin_lock_irq(&dev_priv->rps_lock); |
7499 | dev_priv->pm_iir = 0; | 7797 | dev_priv->pm_iir = 0; |
@@ -8154,7 +8452,7 @@ static void intel_init_display(struct drm_device *dev) | |||
8154 | } | 8452 | } |
8155 | 8453 | ||
8156 | /* Returns the core display clock speed */ | 8454 | /* Returns the core display clock speed */ |
8157 | if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev))) | 8455 | if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) |
8158 | dev_priv->display.get_display_clock_speed = | 8456 | dev_priv->display.get_display_clock_speed = |
8159 | i945_get_display_clock_speed; | 8457 | i945_get_display_clock_speed; |
8160 | else if (IS_I915G(dev)) | 8458 | else if (IS_I915G(dev)) |
@@ -8193,6 +8491,7 @@ static void intel_init_display(struct drm_device *dev) | |||
8193 | } | 8491 | } |
8194 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; | 8492 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; |
8195 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; | 8493 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; |
8494 | dev_priv->display.write_eld = ironlake_write_eld; | ||
8196 | } else if (IS_GEN6(dev)) { | 8495 | } else if (IS_GEN6(dev)) { |
8197 | if (SNB_READ_WM0_LATENCY()) { | 8496 | if (SNB_READ_WM0_LATENCY()) { |
8198 | dev_priv->display.update_wm = sandybridge_update_wm; | 8497 | dev_priv->display.update_wm = sandybridge_update_wm; |
@@ -8203,6 +8502,7 @@ static void intel_init_display(struct drm_device *dev) | |||
8203 | } | 8502 | } |
8204 | dev_priv->display.fdi_link_train = gen6_fdi_link_train; | 8503 | dev_priv->display.fdi_link_train = gen6_fdi_link_train; |
8205 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; | 8504 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; |
8505 | dev_priv->display.write_eld = ironlake_write_eld; | ||
8206 | } else if (IS_IVYBRIDGE(dev)) { | 8506 | } else if (IS_IVYBRIDGE(dev)) { |
8207 | /* FIXME: detect B0+ stepping and use auto training */ | 8507 | /* FIXME: detect B0+ stepping and use auto training */ |
8208 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; | 8508 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; |
@@ -8214,7 +8514,7 @@ static void intel_init_display(struct drm_device *dev) | |||
8214 | dev_priv->display.update_wm = NULL; | 8514 | dev_priv->display.update_wm = NULL; |
8215 | } | 8515 | } |
8216 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; | 8516 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; |
8217 | 8517 | dev_priv->display.write_eld = ironlake_write_eld; | |
8218 | } else | 8518 | } else |
8219 | dev_priv->display.update_wm = NULL; | 8519 | dev_priv->display.update_wm = NULL; |
8220 | } else if (IS_PINEVIEW(dev)) { | 8520 | } else if (IS_PINEVIEW(dev)) { |
@@ -8225,7 +8525,7 @@ static void intel_init_display(struct drm_device *dev) | |||
8225 | DRM_INFO("failed to find known CxSR latency " | 8525 | DRM_INFO("failed to find known CxSR latency " |
8226 | "(found ddr%s fsb freq %d, mem freq %d), " | 8526 | "(found ddr%s fsb freq %d, mem freq %d), " |
8227 | "disabling CxSR\n", | 8527 | "disabling CxSR\n", |
8228 | (dev_priv->is_ddr3 == 1) ? "3": "2", | 8528 | (dev_priv->is_ddr3 == 1) ? "3" : "2", |
8229 | dev_priv->fsb_freq, dev_priv->mem_freq); | 8529 | dev_priv->fsb_freq, dev_priv->mem_freq); |
8230 | /* Disable CxSR and never update its watermark again */ | 8530 | /* Disable CxSR and never update its watermark again */ |
8231 | pineview_disable_cxsr(dev); | 8531 | pineview_disable_cxsr(dev); |
@@ -8234,6 +8534,7 @@ static void intel_init_display(struct drm_device *dev) | |||
8234 | dev_priv->display.update_wm = pineview_update_wm; | 8534 | dev_priv->display.update_wm = pineview_update_wm; |
8235 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; | 8535 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
8236 | } else if (IS_G4X(dev)) { | 8536 | } else if (IS_G4X(dev)) { |
8537 | dev_priv->display.write_eld = g4x_write_eld; | ||
8237 | dev_priv->display.update_wm = g4x_update_wm; | 8538 | dev_priv->display.update_wm = g4x_update_wm; |
8238 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; | 8539 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; |
8239 | } else if (IS_GEN4(dev)) { | 8540 | } else if (IS_GEN4(dev)) { |
@@ -8294,7 +8595,7 @@ static void intel_init_display(struct drm_device *dev) | |||
8294 | * resume, or other times. This quirk makes sure that's the case for | 8595 | * resume, or other times. This quirk makes sure that's the case for |
8295 | * affected systems. | 8596 | * affected systems. |
8296 | */ | 8597 | */ |
8297 | static void quirk_pipea_force (struct drm_device *dev) | 8598 | static void quirk_pipea_force(struct drm_device *dev) |
8298 | { | 8599 | { |
8299 | struct drm_i915_private *dev_priv = dev->dev_private; | 8600 | struct drm_i915_private *dev_priv = dev->dev_private; |
8300 | 8601 | ||
@@ -8322,7 +8623,7 @@ struct intel_quirk intel_quirks[] = { | |||
8322 | /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ | 8623 | /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ |
8323 | { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, | 8624 | { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, |
8324 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | 8625 | /* HP Mini needs pipe A force quirk (LP: #322104) */ |
8325 | { 0x27ae,0x103c, 0x361a, quirk_pipea_force }, | 8626 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, |
8326 | 8627 | ||
8327 | /* Thinkpad R31 needs pipe A force quirk */ | 8628 | /* Thinkpad R31 needs pipe A force quirk */ |
8328 | { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, | 8629 | { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, |
@@ -8488,6 +8789,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
8488 | * enqueue unpin/hotplug work. */ | 8789 | * enqueue unpin/hotplug work. */ |
8489 | drm_irq_uninstall(dev); | 8790 | drm_irq_uninstall(dev); |
8490 | cancel_work_sync(&dev_priv->hotplug_work); | 8791 | cancel_work_sync(&dev_priv->hotplug_work); |
8792 | cancel_work_sync(&dev_priv->rps_work); | ||
8491 | 8793 | ||
8492 | /* flush any delayed tasks or pending work */ | 8794 | /* flush any delayed tasks or pending work */ |
8493 | flush_scheduled_work(); | 8795 | flush_scheduled_work(); |
@@ -8573,7 +8875,7 @@ struct intel_display_error_state { | |||
8573 | struct intel_display_error_state * | 8875 | struct intel_display_error_state * |
8574 | intel_display_capture_error_state(struct drm_device *dev) | 8876 | intel_display_capture_error_state(struct drm_device *dev) |
8575 | { | 8877 | { |
8576 | drm_i915_private_t *dev_priv = dev->dev_private; | 8878 | drm_i915_private_t *dev_priv = dev->dev_private; |
8577 | struct intel_display_error_state *error; | 8879 | struct intel_display_error_state *error; |
8578 | int i; | 8880 | int i; |
8579 | 8881 | ||
@@ -8589,7 +8891,7 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
8589 | error->plane[i].control = I915_READ(DSPCNTR(i)); | 8891 | error->plane[i].control = I915_READ(DSPCNTR(i)); |
8590 | error->plane[i].stride = I915_READ(DSPSTRIDE(i)); | 8892 | error->plane[i].stride = I915_READ(DSPSTRIDE(i)); |
8591 | error->plane[i].size = I915_READ(DSPSIZE(i)); | 8893 | error->plane[i].size = I915_READ(DSPSIZE(i)); |
8592 | error->plane[i].pos= I915_READ(DSPPOS(i)); | 8894 | error->plane[i].pos = I915_READ(DSPPOS(i)); |
8593 | error->plane[i].addr = I915_READ(DSPADDR(i)); | 8895 | error->plane[i].addr = I915_READ(DSPADDR(i)); |
8594 | if (INTEL_INFO(dev)->gen >= 4) { | 8896 | if (INTEL_INFO(dev)->gen >= 4) { |
8595 | error->plane[i].surface = I915_READ(DSPSURF(i)); | 8897 | error->plane[i].surface = I915_READ(DSPSURF(i)); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 44fef5e1c490..fc1a0832af4f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
37 | #include "drm_dp_helper.h" | 37 | #include "drm_dp_helper.h" |
38 | 38 | ||
39 | 39 | #define DP_RECEIVER_CAP_SIZE 0xf | |
40 | #define DP_LINK_STATUS_SIZE 6 | 40 | #define DP_LINK_STATUS_SIZE 6 |
41 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) | 41 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
42 | 42 | ||
@@ -53,12 +53,21 @@ struct intel_dp { | |||
53 | int dpms_mode; | 53 | int dpms_mode; |
54 | uint8_t link_bw; | 54 | uint8_t link_bw; |
55 | uint8_t lane_count; | 55 | uint8_t lane_count; |
56 | uint8_t dpcd[8]; | 56 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
57 | struct i2c_adapter adapter; | 57 | struct i2c_adapter adapter; |
58 | struct i2c_algo_dp_aux_data algo; | 58 | struct i2c_algo_dp_aux_data algo; |
59 | bool is_pch_edp; | 59 | bool is_pch_edp; |
60 | uint8_t train_set[4]; | 60 | uint8_t train_set[4]; |
61 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 61 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
62 | int panel_power_up_delay; | ||
63 | int panel_power_down_delay; | ||
64 | int panel_power_cycle_delay; | ||
65 | int backlight_on_delay; | ||
66 | int backlight_off_delay; | ||
67 | struct drm_display_mode *panel_fixed_mode; /* for eDP */ | ||
68 | struct delayed_work panel_vdd_work; | ||
69 | bool want_panel_vdd; | ||
70 | unsigned long panel_off_jiffies; | ||
62 | }; | 71 | }; |
63 | 72 | ||
64 | /** | 73 | /** |
@@ -86,6 +95,17 @@ static bool is_pch_edp(struct intel_dp *intel_dp) | |||
86 | return intel_dp->is_pch_edp; | 95 | return intel_dp->is_pch_edp; |
87 | } | 96 | } |
88 | 97 | ||
98 | /** | ||
99 | * is_cpu_edp - is the port on the CPU and attached to an eDP panel? | ||
100 | * @intel_dp: DP struct | ||
101 | * | ||
102 | * Returns true if the given DP struct corresponds to a CPU eDP port. | ||
103 | */ | ||
104 | static bool is_cpu_edp(struct intel_dp *intel_dp) | ||
105 | { | ||
106 | return is_edp(intel_dp) && !is_pch_edp(intel_dp); | ||
107 | } | ||
108 | |||
89 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) | 109 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) |
90 | { | 110 | { |
91 | return container_of(encoder, struct intel_dp, base.base); | 111 | return container_of(encoder, struct intel_dp, base.base); |
@@ -121,7 +141,7 @@ static void intel_dp_complete_link_train(struct intel_dp *intel_dp); | |||
121 | static void intel_dp_link_down(struct intel_dp *intel_dp); | 141 | static void intel_dp_link_down(struct intel_dp *intel_dp); |
122 | 142 | ||
123 | void | 143 | void |
124 | intel_edp_link_config (struct intel_encoder *intel_encoder, | 144 | intel_edp_link_config(struct intel_encoder *intel_encoder, |
125 | int *lane_num, int *link_bw) | 145 | int *lane_num, int *link_bw) |
126 | { | 146 | { |
127 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); | 147 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); |
@@ -175,9 +195,25 @@ intel_dp_link_clock(uint8_t link_bw) | |||
175 | return 162000; | 195 | return 162000; |
176 | } | 196 | } |
177 | 197 | ||
178 | /* I think this is a fiction */ | 198 | /* |
199 | * The units on the numbers in the next two are... bizarre. Examples will | ||
200 | * make it clearer; this one parallels an example in the eDP spec. | ||
201 | * | ||
202 | * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: | ||
203 | * | ||
204 | * 270000 * 1 * 8 / 10 == 216000 | ||
205 | * | ||
206 | * The actual data capacity of that configuration is 2.16Gbit/s, so the | ||
207 | * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - | ||
208 | * or equivalently, kilopixels per second - so for 1680x1050R it'd be | ||
209 | * 119000. At 18bpp that's 2142000 kilobits per second. | ||
210 | * | ||
211 | * Thus the strange-looking division by 10 in intel_dp_link_required, to | ||
212 | * get the result in decakilobits instead of kilobits. | ||
213 | */ | ||
214 | |||
179 | static int | 215 | static int |
180 | intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock) | 216 | intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock) |
181 | { | 217 | { |
182 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | 218 | struct drm_crtc *crtc = intel_dp->base.base.crtc; |
183 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 219 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
@@ -186,7 +222,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi | |||
186 | if (intel_crtc) | 222 | if (intel_crtc) |
187 | bpp = intel_crtc->bpp; | 223 | bpp = intel_crtc->bpp; |
188 | 224 | ||
189 | return (pixel_clock * bpp + 7) / 8; | 225 | return (pixel_clock * bpp + 9) / 10; |
190 | } | 226 | } |
191 | 227 | ||
192 | static int | 228 | static int |
@@ -200,24 +236,19 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
200 | struct drm_display_mode *mode) | 236 | struct drm_display_mode *mode) |
201 | { | 237 | { |
202 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 238 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
203 | struct drm_device *dev = connector->dev; | ||
204 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
205 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); | 239 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); |
206 | int max_lanes = intel_dp_max_lane_count(intel_dp); | 240 | int max_lanes = intel_dp_max_lane_count(intel_dp); |
207 | 241 | ||
208 | if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { | 242 | if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { |
209 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) | 243 | if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) |
210 | return MODE_PANEL; | 244 | return MODE_PANEL; |
211 | 245 | ||
212 | if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay) | 246 | if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) |
213 | return MODE_PANEL; | 247 | return MODE_PANEL; |
214 | } | 248 | } |
215 | 249 | ||
216 | /* only refuse the mode on non eDP since we have seen some weird eDP panels | 250 | if (intel_dp_link_required(intel_dp, mode->clock) |
217 | which are outside spec tolerances but somehow work by magic */ | 251 | > intel_dp_max_data_rate(max_link_clock, max_lanes)) |
218 | if (!is_edp(intel_dp) && | ||
219 | (intel_dp_link_required(connector->dev, intel_dp, mode->clock) | ||
220 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) | ||
221 | return MODE_CLOCK_HIGH; | 252 | return MODE_CLOCK_HIGH; |
222 | 253 | ||
223 | if (mode->clock < 10000) | 254 | if (mode->clock < 10000) |
@@ -279,6 +310,38 @@ intel_hrawclk(struct drm_device *dev) | |||
279 | } | 310 | } |
280 | } | 311 | } |
281 | 312 | ||
313 | static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) | ||
314 | { | ||
315 | struct drm_device *dev = intel_dp->base.base.dev; | ||
316 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
317 | |||
318 | return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; | ||
319 | } | ||
320 | |||
321 | static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) | ||
322 | { | ||
323 | struct drm_device *dev = intel_dp->base.base.dev; | ||
324 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
325 | |||
326 | return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; | ||
327 | } | ||
328 | |||
329 | static void | ||
330 | intel_dp_check_edp(struct intel_dp *intel_dp) | ||
331 | { | ||
332 | struct drm_device *dev = intel_dp->base.base.dev; | ||
333 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
334 | |||
335 | if (!is_edp(intel_dp)) | ||
336 | return; | ||
337 | if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { | ||
338 | WARN(1, "eDP powered off while attempting aux channel communication.\n"); | ||
339 | DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", | ||
340 | I915_READ(PCH_PP_STATUS), | ||
341 | I915_READ(PCH_PP_CONTROL)); | ||
342 | } | ||
343 | } | ||
344 | |||
282 | static int | 345 | static int |
283 | intel_dp_aux_ch(struct intel_dp *intel_dp, | 346 | intel_dp_aux_ch(struct intel_dp *intel_dp, |
284 | uint8_t *send, int send_bytes, | 347 | uint8_t *send, int send_bytes, |
@@ -295,6 +358,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
295 | uint32_t aux_clock_divider; | 358 | uint32_t aux_clock_divider; |
296 | int try, precharge; | 359 | int try, precharge; |
297 | 360 | ||
361 | intel_dp_check_edp(intel_dp); | ||
298 | /* The clock divider is based off the hrawclk, | 362 | /* The clock divider is based off the hrawclk, |
299 | * and would like to run at 2MHz. So, take the | 363 | * and would like to run at 2MHz. So, take the |
300 | * hrawclk value and divide by 2 and use that | 364 | * hrawclk value and divide by 2 and use that |
@@ -302,7 +366,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
302 | * Note that PCH attached eDP panels should use a 125MHz input | 366 | * Note that PCH attached eDP panels should use a 125MHz input |
303 | * clock divider. | 367 | * clock divider. |
304 | */ | 368 | */ |
305 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { | 369 | if (is_cpu_edp(intel_dp)) { |
306 | if (IS_GEN6(dev)) | 370 | if (IS_GEN6(dev)) |
307 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ | 371 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ |
308 | else | 372 | else |
@@ -337,7 +401,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
337 | for (i = 0; i < send_bytes; i += 4) | 401 | for (i = 0; i < send_bytes; i += 4) |
338 | I915_WRITE(ch_data + i, | 402 | I915_WRITE(ch_data + i, |
339 | pack_aux(send + i, send_bytes - i)); | 403 | pack_aux(send + i, send_bytes - i)); |
340 | 404 | ||
341 | /* Send the command and wait for it to complete */ | 405 | /* Send the command and wait for it to complete */ |
342 | I915_WRITE(ch_ctl, | 406 | I915_WRITE(ch_ctl, |
343 | DP_AUX_CH_CTL_SEND_BUSY | | 407 | DP_AUX_CH_CTL_SEND_BUSY | |
@@ -354,7 +418,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
354 | break; | 418 | break; |
355 | udelay(100); | 419 | udelay(100); |
356 | } | 420 | } |
357 | 421 | ||
358 | /* Clear done status and any errors */ | 422 | /* Clear done status and any errors */ |
359 | I915_WRITE(ch_ctl, | 423 | I915_WRITE(ch_ctl, |
360 | status | | 424 | status | |
@@ -390,7 +454,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
390 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); | 454 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); |
391 | if (recv_bytes > recv_size) | 455 | if (recv_bytes > recv_size) |
392 | recv_bytes = recv_size; | 456 | recv_bytes = recv_size; |
393 | 457 | ||
394 | for (i = 0; i < recv_bytes; i += 4) | 458 | for (i = 0; i < recv_bytes; i += 4) |
395 | unpack_aux(I915_READ(ch_data + i), | 459 | unpack_aux(I915_READ(ch_data + i), |
396 | recv + i, recv_bytes - i); | 460 | recv + i, recv_bytes - i); |
@@ -408,6 +472,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, | |||
408 | int msg_bytes; | 472 | int msg_bytes; |
409 | uint8_t ack; | 473 | uint8_t ack; |
410 | 474 | ||
475 | intel_dp_check_edp(intel_dp); | ||
411 | if (send_bytes > 16) | 476 | if (send_bytes > 16) |
412 | return -1; | 477 | return -1; |
413 | msg[0] = AUX_NATIVE_WRITE << 4; | 478 | msg[0] = AUX_NATIVE_WRITE << 4; |
@@ -450,6 +515,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, | |||
450 | uint8_t ack; | 515 | uint8_t ack; |
451 | int ret; | 516 | int ret; |
452 | 517 | ||
518 | intel_dp_check_edp(intel_dp); | ||
453 | msg[0] = AUX_NATIVE_READ << 4; | 519 | msg[0] = AUX_NATIVE_READ << 4; |
454 | msg[1] = address >> 8; | 520 | msg[1] = address >> 8; |
455 | msg[2] = address & 0xff; | 521 | msg[2] = address & 0xff; |
@@ -493,6 +559,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
493 | int reply_bytes; | 559 | int reply_bytes; |
494 | int ret; | 560 | int ret; |
495 | 561 | ||
562 | intel_dp_check_edp(intel_dp); | ||
496 | /* Set up the command byte */ | 563 | /* Set up the command byte */ |
497 | if (mode & MODE_I2C_READ) | 564 | if (mode & MODE_I2C_READ) |
498 | msg[0] = AUX_I2C_READ << 4; | 565 | msg[0] = AUX_I2C_READ << 4; |
@@ -573,24 +640,32 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
573 | return -EREMOTEIO; | 640 | return -EREMOTEIO; |
574 | } | 641 | } |
575 | 642 | ||
643 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); | ||
644 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); | ||
645 | |||
576 | static int | 646 | static int |
577 | intel_dp_i2c_init(struct intel_dp *intel_dp, | 647 | intel_dp_i2c_init(struct intel_dp *intel_dp, |
578 | struct intel_connector *intel_connector, const char *name) | 648 | struct intel_connector *intel_connector, const char *name) |
579 | { | 649 | { |
650 | int ret; | ||
651 | |||
580 | DRM_DEBUG_KMS("i2c_init %s\n", name); | 652 | DRM_DEBUG_KMS("i2c_init %s\n", name); |
581 | intel_dp->algo.running = false; | 653 | intel_dp->algo.running = false; |
582 | intel_dp->algo.address = 0; | 654 | intel_dp->algo.address = 0; |
583 | intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; | 655 | intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; |
584 | 656 | ||
585 | memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter)); | 657 | memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); |
586 | intel_dp->adapter.owner = THIS_MODULE; | 658 | intel_dp->adapter.owner = THIS_MODULE; |
587 | intel_dp->adapter.class = I2C_CLASS_DDC; | 659 | intel_dp->adapter.class = I2C_CLASS_DDC; |
588 | strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); | 660 | strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); |
589 | intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; | 661 | intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; |
590 | intel_dp->adapter.algo_data = &intel_dp->algo; | 662 | intel_dp->adapter.algo_data = &intel_dp->algo; |
591 | intel_dp->adapter.dev.parent = &intel_connector->base.kdev; | 663 | intel_dp->adapter.dev.parent = &intel_connector->base.kdev; |
592 | 664 | ||
593 | return i2c_dp_aux_add_bus(&intel_dp->adapter); | 665 | ironlake_edp_panel_vdd_on(intel_dp); |
666 | ret = i2c_dp_aux_add_bus(&intel_dp->adapter); | ||
667 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
668 | return ret; | ||
594 | } | 669 | } |
595 | 670 | ||
596 | static bool | 671 | static bool |
@@ -598,29 +673,28 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
598 | struct drm_display_mode *adjusted_mode) | 673 | struct drm_display_mode *adjusted_mode) |
599 | { | 674 | { |
600 | struct drm_device *dev = encoder->dev; | 675 | struct drm_device *dev = encoder->dev; |
601 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
602 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 676 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
603 | int lane_count, clock; | 677 | int lane_count, clock; |
604 | int max_lane_count = intel_dp_max_lane_count(intel_dp); | 678 | int max_lane_count = intel_dp_max_lane_count(intel_dp); |
605 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; | 679 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; |
606 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 680 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
607 | 681 | ||
608 | if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { | 682 | if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { |
609 | intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); | 683 | intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); |
610 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, | 684 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, |
611 | mode, adjusted_mode); | 685 | mode, adjusted_mode); |
612 | /* | 686 | /* |
613 | * the mode->clock is used to calculate the Data&Link M/N | 687 | * the mode->clock is used to calculate the Data&Link M/N |
614 | * of the pipe. For the eDP the fixed clock should be used. | 688 | * of the pipe. For the eDP the fixed clock should be used. |
615 | */ | 689 | */ |
616 | mode->clock = dev_priv->panel_fixed_mode->clock; | 690 | mode->clock = intel_dp->panel_fixed_mode->clock; |
617 | } | 691 | } |
618 | 692 | ||
619 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 693 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
620 | for (clock = 0; clock <= max_clock; clock++) { | 694 | for (clock = 0; clock <= max_clock; clock++) { |
621 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); | 695 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
622 | 696 | ||
623 | if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock) | 697 | if (intel_dp_link_required(intel_dp, mode->clock) |
624 | <= link_avail) { | 698 | <= link_avail) { |
625 | intel_dp->link_bw = bws[clock]; | 699 | intel_dp->link_bw = bws[clock]; |
626 | intel_dp->lane_count = lane_count; | 700 | intel_dp->lane_count = lane_count; |
@@ -634,19 +708,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
634 | } | 708 | } |
635 | } | 709 | } |
636 | 710 | ||
637 | if (is_edp(intel_dp)) { | ||
638 | /* okay we failed just pick the highest */ | ||
639 | intel_dp->lane_count = max_lane_count; | ||
640 | intel_dp->link_bw = bws[max_clock]; | ||
641 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); | ||
642 | DRM_DEBUG_KMS("Force picking display port link bw %02x lane " | ||
643 | "count %d clock %d\n", | ||
644 | intel_dp->link_bw, intel_dp->lane_count, | ||
645 | adjusted_mode->clock); | ||
646 | |||
647 | return true; | ||
648 | } | ||
649 | |||
650 | return false; | 711 | return false; |
651 | } | 712 | } |
652 | 713 | ||
@@ -740,6 +801,9 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
740 | } | 801 | } |
741 | } | 802 | } |
742 | 803 | ||
804 | static void ironlake_edp_pll_on(struct drm_encoder *encoder); | ||
805 | static void ironlake_edp_pll_off(struct drm_encoder *encoder); | ||
806 | |||
743 | static void | 807 | static void |
744 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | 808 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, |
745 | struct drm_display_mode *adjusted_mode) | 809 | struct drm_display_mode *adjusted_mode) |
@@ -749,6 +813,14 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
749 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | 813 | struct drm_crtc *crtc = intel_dp->base.base.crtc; |
750 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 814 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
751 | 815 | ||
816 | /* Turn on the eDP PLL if needed */ | ||
817 | if (is_edp(intel_dp)) { | ||
818 | if (!is_pch_edp(intel_dp)) | ||
819 | ironlake_edp_pll_on(encoder); | ||
820 | else | ||
821 | ironlake_edp_pll_off(encoder); | ||
822 | } | ||
823 | |||
752 | intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; | 824 | intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; |
753 | intel_dp->DP |= intel_dp->color_range; | 825 | intel_dp->DP |= intel_dp->color_range; |
754 | 826 | ||
@@ -757,7 +829,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
757 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 829 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
758 | intel_dp->DP |= DP_SYNC_VS_HIGH; | 830 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
759 | 831 | ||
760 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 832 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
761 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; | 833 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
762 | else | 834 | else |
763 | intel_dp->DP |= DP_LINK_TRAIN_OFF; | 835 | intel_dp->DP |= DP_LINK_TRAIN_OFF; |
@@ -773,8 +845,12 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
773 | intel_dp->DP |= DP_PORT_WIDTH_4; | 845 | intel_dp->DP |= DP_PORT_WIDTH_4; |
774 | break; | 846 | break; |
775 | } | 847 | } |
776 | if (intel_dp->has_audio) | 848 | if (intel_dp->has_audio) { |
849 | DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", | ||
850 | pipe_name(intel_crtc->pipe)); | ||
777 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; | 851 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; |
852 | intel_write_eld(encoder, adjusted_mode); | ||
853 | } | ||
778 | 854 | ||
779 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | 855 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); |
780 | intel_dp->link_configuration[0] = intel_dp->link_bw; | 856 | intel_dp->link_configuration[0] = intel_dp->link_bw; |
@@ -794,7 +870,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
794 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) | 870 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) |
795 | intel_dp->DP |= DP_PIPEB_SELECT; | 871 | intel_dp->DP |= DP_PIPEB_SELECT; |
796 | 872 | ||
797 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { | 873 | if (is_cpu_edp(intel_dp)) { |
798 | /* don't miss out required setting for eDP */ | 874 | /* don't miss out required setting for eDP */ |
799 | intel_dp->DP |= DP_PLL_ENABLE; | 875 | intel_dp->DP |= DP_PLL_ENABLE; |
800 | if (adjusted_mode->clock < 200000) | 876 | if (adjusted_mode->clock < 200000) |
@@ -804,58 +880,150 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
804 | } | 880 | } |
805 | } | 881 | } |
806 | 882 | ||
883 | static void ironlake_wait_panel_off(struct intel_dp *intel_dp) | ||
884 | { | ||
885 | unsigned long off_time; | ||
886 | unsigned long delay; | ||
887 | |||
888 | DRM_DEBUG_KMS("Wait for panel power off time\n"); | ||
889 | |||
890 | if (ironlake_edp_have_panel_power(intel_dp) || | ||
891 | ironlake_edp_have_panel_vdd(intel_dp)) | ||
892 | { | ||
893 | DRM_DEBUG_KMS("Panel still on, no delay needed\n"); | ||
894 | return; | ||
895 | } | ||
896 | |||
897 | off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); | ||
898 | if (time_after(jiffies, off_time)) { | ||
899 | DRM_DEBUG_KMS("Time already passed"); | ||
900 | return; | ||
901 | } | ||
902 | delay = jiffies_to_msecs(off_time - jiffies); | ||
903 | if (delay > intel_dp->panel_power_down_delay) | ||
904 | delay = intel_dp->panel_power_down_delay; | ||
905 | DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); | ||
906 | msleep(delay); | ||
907 | } | ||
908 | |||
807 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) | 909 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) |
808 | { | 910 | { |
809 | struct drm_device *dev = intel_dp->base.base.dev; | 911 | struct drm_device *dev = intel_dp->base.base.dev; |
810 | struct drm_i915_private *dev_priv = dev->dev_private; | 912 | struct drm_i915_private *dev_priv = dev->dev_private; |
811 | u32 pp; | 913 | u32 pp; |
812 | 914 | ||
813 | /* | 915 | if (!is_edp(intel_dp)) |
814 | * If the panel wasn't on, make sure there's not a currently | 916 | return; |
815 | * active PP sequence before enabling AUX VDD. | 917 | DRM_DEBUG_KMS("Turn eDP VDD on\n"); |
816 | */ | 918 | |
817 | if (!(I915_READ(PCH_PP_STATUS) & PP_ON)) | 919 | WARN(intel_dp->want_panel_vdd, |
818 | msleep(dev_priv->panel_t3); | 920 | "eDP VDD already requested on\n"); |
921 | |||
922 | intel_dp->want_panel_vdd = true; | ||
923 | if (ironlake_edp_have_panel_vdd(intel_dp)) { | ||
924 | DRM_DEBUG_KMS("eDP VDD already on\n"); | ||
925 | return; | ||
926 | } | ||
819 | 927 | ||
928 | ironlake_wait_panel_off(intel_dp); | ||
820 | pp = I915_READ(PCH_PP_CONTROL); | 929 | pp = I915_READ(PCH_PP_CONTROL); |
930 | pp &= ~PANEL_UNLOCK_MASK; | ||
931 | pp |= PANEL_UNLOCK_REGS; | ||
821 | pp |= EDP_FORCE_VDD; | 932 | pp |= EDP_FORCE_VDD; |
822 | I915_WRITE(PCH_PP_CONTROL, pp); | 933 | I915_WRITE(PCH_PP_CONTROL, pp); |
823 | POSTING_READ(PCH_PP_CONTROL); | 934 | POSTING_READ(PCH_PP_CONTROL); |
935 | DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", | ||
936 | I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); | ||
937 | |||
938 | /* | ||
939 | * If the panel wasn't on, delay before accessing aux channel | ||
940 | */ | ||
941 | if (!ironlake_edp_have_panel_power(intel_dp)) { | ||
942 | DRM_DEBUG_KMS("eDP was not running\n"); | ||
943 | msleep(intel_dp->panel_power_up_delay); | ||
944 | } | ||
824 | } | 945 | } |
825 | 946 | ||
826 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp) | 947 | static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) |
827 | { | 948 | { |
828 | struct drm_device *dev = intel_dp->base.base.dev; | 949 | struct drm_device *dev = intel_dp->base.base.dev; |
829 | struct drm_i915_private *dev_priv = dev->dev_private; | 950 | struct drm_i915_private *dev_priv = dev->dev_private; |
830 | u32 pp; | 951 | u32 pp; |
831 | 952 | ||
832 | pp = I915_READ(PCH_PP_CONTROL); | 953 | if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { |
833 | pp &= ~EDP_FORCE_VDD; | 954 | pp = I915_READ(PCH_PP_CONTROL); |
834 | I915_WRITE(PCH_PP_CONTROL, pp); | 955 | pp &= ~PANEL_UNLOCK_MASK; |
835 | POSTING_READ(PCH_PP_CONTROL); | 956 | pp |= PANEL_UNLOCK_REGS; |
957 | pp &= ~EDP_FORCE_VDD; | ||
958 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
959 | POSTING_READ(PCH_PP_CONTROL); | ||
960 | |||
961 | /* Make sure sequencer is idle before allowing subsequent activity */ | ||
962 | DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", | ||
963 | I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); | ||
964 | intel_dp->panel_off_jiffies = jiffies; | ||
965 | } | ||
966 | } | ||
967 | |||
968 | static void ironlake_panel_vdd_work(struct work_struct *__work) | ||
969 | { | ||
970 | struct intel_dp *intel_dp = container_of(to_delayed_work(__work), | ||
971 | struct intel_dp, panel_vdd_work); | ||
972 | struct drm_device *dev = intel_dp->base.base.dev; | ||
836 | 973 | ||
837 | /* Make sure sequencer is idle before allowing subsequent activity */ | 974 | mutex_lock(&dev->struct_mutex); |
838 | msleep(dev_priv->panel_t12); | 975 | ironlake_panel_vdd_off_sync(intel_dp); |
976 | mutex_unlock(&dev->struct_mutex); | ||
977 | } | ||
978 | |||
979 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) | ||
980 | { | ||
981 | if (!is_edp(intel_dp)) | ||
982 | return; | ||
983 | |||
984 | DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); | ||
985 | WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); | ||
986 | |||
987 | intel_dp->want_panel_vdd = false; | ||
988 | |||
989 | if (sync) { | ||
990 | ironlake_panel_vdd_off_sync(intel_dp); | ||
991 | } else { | ||
992 | /* | ||
993 | * Queue the timer to fire a long | ||
994 | * time from now (relative to the power down delay) | ||
995 | * to keep the panel power up across a sequence of operations | ||
996 | */ | ||
997 | schedule_delayed_work(&intel_dp->panel_vdd_work, | ||
998 | msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); | ||
999 | } | ||
839 | } | 1000 | } |
840 | 1001 | ||
841 | /* Returns true if the panel was already on when called */ | 1002 | /* Returns true if the panel was already on when called */ |
842 | static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) | 1003 | static void ironlake_edp_panel_on(struct intel_dp *intel_dp) |
843 | { | 1004 | { |
844 | struct drm_device *dev = intel_dp->base.base.dev; | 1005 | struct drm_device *dev = intel_dp->base.base.dev; |
845 | struct drm_i915_private *dev_priv = dev->dev_private; | 1006 | struct drm_i915_private *dev_priv = dev->dev_private; |
846 | u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; | 1007 | u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; |
847 | 1008 | ||
848 | if (I915_READ(PCH_PP_STATUS) & PP_ON) | 1009 | if (!is_edp(intel_dp)) |
849 | return true; | 1010 | return; |
1011 | if (ironlake_edp_have_panel_power(intel_dp)) | ||
1012 | return; | ||
850 | 1013 | ||
1014 | ironlake_wait_panel_off(intel_dp); | ||
851 | pp = I915_READ(PCH_PP_CONTROL); | 1015 | pp = I915_READ(PCH_PP_CONTROL); |
1016 | pp &= ~PANEL_UNLOCK_MASK; | ||
1017 | pp |= PANEL_UNLOCK_REGS; | ||
1018 | |||
1019 | if (IS_GEN5(dev)) { | ||
1020 | /* ILK workaround: disable reset around power sequence */ | ||
1021 | pp &= ~PANEL_POWER_RESET; | ||
1022 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
1023 | POSTING_READ(PCH_PP_CONTROL); | ||
1024 | } | ||
852 | 1025 | ||
853 | /* ILK workaround: disable reset around power sequence */ | 1026 | pp |= POWER_TARGET_ON; |
854 | pp &= ~PANEL_POWER_RESET; | ||
855 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
856 | POSTING_READ(PCH_PP_CONTROL); | ||
857 | |||
858 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; | ||
859 | I915_WRITE(PCH_PP_CONTROL, pp); | 1027 | I915_WRITE(PCH_PP_CONTROL, pp); |
860 | POSTING_READ(PCH_PP_CONTROL); | 1028 | POSTING_READ(PCH_PP_CONTROL); |
861 | 1029 | ||
@@ -864,44 +1032,64 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) | |||
864 | DRM_ERROR("panel on wait timed out: 0x%08x\n", | 1032 | DRM_ERROR("panel on wait timed out: 0x%08x\n", |
865 | I915_READ(PCH_PP_STATUS)); | 1033 | I915_READ(PCH_PP_STATUS)); |
866 | 1034 | ||
867 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 1035 | if (IS_GEN5(dev)) { |
868 | I915_WRITE(PCH_PP_CONTROL, pp); | 1036 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
869 | POSTING_READ(PCH_PP_CONTROL); | 1037 | I915_WRITE(PCH_PP_CONTROL, pp); |
870 | 1038 | POSTING_READ(PCH_PP_CONTROL); | |
871 | return false; | 1039 | } |
872 | } | 1040 | } |
873 | 1041 | ||
874 | static void ironlake_edp_panel_off (struct drm_device *dev) | 1042 | static void ironlake_edp_panel_off(struct drm_encoder *encoder) |
875 | { | 1043 | { |
1044 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
1045 | struct drm_device *dev = encoder->dev; | ||
876 | struct drm_i915_private *dev_priv = dev->dev_private; | 1046 | struct drm_i915_private *dev_priv = dev->dev_private; |
877 | u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | | 1047 | u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | |
878 | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; | 1048 | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; |
879 | 1049 | ||
1050 | if (!is_edp(intel_dp)) | ||
1051 | return; | ||
880 | pp = I915_READ(PCH_PP_CONTROL); | 1052 | pp = I915_READ(PCH_PP_CONTROL); |
1053 | pp &= ~PANEL_UNLOCK_MASK; | ||
1054 | pp |= PANEL_UNLOCK_REGS; | ||
1055 | |||
1056 | if (IS_GEN5(dev)) { | ||
1057 | /* ILK workaround: disable reset around power sequence */ | ||
1058 | pp &= ~PANEL_POWER_RESET; | ||
1059 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
1060 | POSTING_READ(PCH_PP_CONTROL); | ||
1061 | } | ||
881 | 1062 | ||
882 | /* ILK workaround: disable reset around power sequence */ | 1063 | intel_dp->panel_off_jiffies = jiffies; |
883 | pp &= ~PANEL_POWER_RESET; | ||
884 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
885 | POSTING_READ(PCH_PP_CONTROL); | ||
886 | 1064 | ||
887 | pp &= ~POWER_TARGET_ON; | 1065 | if (IS_GEN5(dev)) { |
888 | I915_WRITE(PCH_PP_CONTROL, pp); | 1066 | pp &= ~POWER_TARGET_ON; |
889 | POSTING_READ(PCH_PP_CONTROL); | 1067 | I915_WRITE(PCH_PP_CONTROL, pp); |
1068 | POSTING_READ(PCH_PP_CONTROL); | ||
1069 | pp &= ~POWER_TARGET_ON; | ||
1070 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
1071 | POSTING_READ(PCH_PP_CONTROL); | ||
1072 | msleep(intel_dp->panel_power_cycle_delay); | ||
890 | 1073 | ||
891 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) | 1074 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) |
892 | DRM_ERROR("panel off wait timed out: 0x%08x\n", | 1075 | DRM_ERROR("panel off wait timed out: 0x%08x\n", |
893 | I915_READ(PCH_PP_STATUS)); | 1076 | I915_READ(PCH_PP_STATUS)); |
894 | 1077 | ||
895 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 1078 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
896 | I915_WRITE(PCH_PP_CONTROL, pp); | 1079 | I915_WRITE(PCH_PP_CONTROL, pp); |
897 | POSTING_READ(PCH_PP_CONTROL); | 1080 | POSTING_READ(PCH_PP_CONTROL); |
1081 | } | ||
898 | } | 1082 | } |
899 | 1083 | ||
900 | static void ironlake_edp_backlight_on (struct drm_device *dev) | 1084 | static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) |
901 | { | 1085 | { |
1086 | struct drm_device *dev = intel_dp->base.base.dev; | ||
902 | struct drm_i915_private *dev_priv = dev->dev_private; | 1087 | struct drm_i915_private *dev_priv = dev->dev_private; |
903 | u32 pp; | 1088 | u32 pp; |
904 | 1089 | ||
1090 | if (!is_edp(intel_dp)) | ||
1091 | return; | ||
1092 | |||
905 | DRM_DEBUG_KMS("\n"); | 1093 | DRM_DEBUG_KMS("\n"); |
906 | /* | 1094 | /* |
907 | * If we enable the backlight right away following a panel power | 1095 | * If we enable the backlight right away following a panel power |
@@ -909,21 +1097,32 @@ static void ironlake_edp_backlight_on (struct drm_device *dev) | |||
909 | * link. So delay a bit to make sure the image is solid before | 1097 | * link. So delay a bit to make sure the image is solid before |
910 | * allowing it to appear. | 1098 | * allowing it to appear. |
911 | */ | 1099 | */ |
912 | msleep(300); | 1100 | msleep(intel_dp->backlight_on_delay); |
913 | pp = I915_READ(PCH_PP_CONTROL); | 1101 | pp = I915_READ(PCH_PP_CONTROL); |
1102 | pp &= ~PANEL_UNLOCK_MASK; | ||
1103 | pp |= PANEL_UNLOCK_REGS; | ||
914 | pp |= EDP_BLC_ENABLE; | 1104 | pp |= EDP_BLC_ENABLE; |
915 | I915_WRITE(PCH_PP_CONTROL, pp); | 1105 | I915_WRITE(PCH_PP_CONTROL, pp); |
1106 | POSTING_READ(PCH_PP_CONTROL); | ||
916 | } | 1107 | } |
917 | 1108 | ||
918 | static void ironlake_edp_backlight_off (struct drm_device *dev) | 1109 | static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) |
919 | { | 1110 | { |
1111 | struct drm_device *dev = intel_dp->base.base.dev; | ||
920 | struct drm_i915_private *dev_priv = dev->dev_private; | 1112 | struct drm_i915_private *dev_priv = dev->dev_private; |
921 | u32 pp; | 1113 | u32 pp; |
922 | 1114 | ||
1115 | if (!is_edp(intel_dp)) | ||
1116 | return; | ||
1117 | |||
923 | DRM_DEBUG_KMS("\n"); | 1118 | DRM_DEBUG_KMS("\n"); |
924 | pp = I915_READ(PCH_PP_CONTROL); | 1119 | pp = I915_READ(PCH_PP_CONTROL); |
1120 | pp &= ~PANEL_UNLOCK_MASK; | ||
1121 | pp |= PANEL_UNLOCK_REGS; | ||
925 | pp &= ~EDP_BLC_ENABLE; | 1122 | pp &= ~EDP_BLC_ENABLE; |
926 | I915_WRITE(PCH_PP_CONTROL, pp); | 1123 | I915_WRITE(PCH_PP_CONTROL, pp); |
1124 | POSTING_READ(PCH_PP_CONTROL); | ||
1125 | msleep(intel_dp->backlight_off_delay); | ||
927 | } | 1126 | } |
928 | 1127 | ||
929 | static void ironlake_edp_pll_on(struct drm_encoder *encoder) | 1128 | static void ironlake_edp_pll_on(struct drm_encoder *encoder) |
@@ -986,43 +1185,39 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | |||
986 | static void intel_dp_prepare(struct drm_encoder *encoder) | 1185 | static void intel_dp_prepare(struct drm_encoder *encoder) |
987 | { | 1186 | { |
988 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1187 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
989 | struct drm_device *dev = encoder->dev; | ||
990 | 1188 | ||
991 | /* Wake up the sink first */ | 1189 | /* Wake up the sink first */ |
1190 | ironlake_edp_panel_vdd_on(intel_dp); | ||
992 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | 1191 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
1192 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
993 | 1193 | ||
994 | if (is_edp(intel_dp)) { | 1194 | /* Make sure the panel is off before trying to |
995 | ironlake_edp_backlight_off(dev); | 1195 | * change the mode |
996 | ironlake_edp_panel_off(dev); | 1196 | */ |
997 | if (!is_pch_edp(intel_dp)) | 1197 | ironlake_edp_backlight_off(intel_dp); |
998 | ironlake_edp_pll_on(encoder); | ||
999 | else | ||
1000 | ironlake_edp_pll_off(encoder); | ||
1001 | } | ||
1002 | intel_dp_link_down(intel_dp); | 1198 | intel_dp_link_down(intel_dp); |
1199 | ironlake_edp_panel_off(encoder); | ||
1003 | } | 1200 | } |
1004 | 1201 | ||
1005 | static void intel_dp_commit(struct drm_encoder *encoder) | 1202 | static void intel_dp_commit(struct drm_encoder *encoder) |
1006 | { | 1203 | { |
1007 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1204 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1008 | struct drm_device *dev = encoder->dev; | 1205 | struct drm_device *dev = encoder->dev; |
1206 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); | ||
1009 | 1207 | ||
1010 | if (is_edp(intel_dp)) | 1208 | ironlake_edp_panel_vdd_on(intel_dp); |
1011 | ironlake_edp_panel_vdd_on(intel_dp); | 1209 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
1012 | |||
1013 | intel_dp_start_link_train(intel_dp); | 1210 | intel_dp_start_link_train(intel_dp); |
1014 | 1211 | ironlake_edp_panel_on(intel_dp); | |
1015 | if (is_edp(intel_dp)) { | 1212 | ironlake_edp_panel_vdd_off(intel_dp, true); |
1016 | ironlake_edp_panel_on(intel_dp); | ||
1017 | ironlake_edp_panel_vdd_off(intel_dp); | ||
1018 | } | ||
1019 | 1213 | ||
1020 | intel_dp_complete_link_train(intel_dp); | 1214 | intel_dp_complete_link_train(intel_dp); |
1021 | 1215 | ironlake_edp_backlight_on(intel_dp); | |
1022 | if (is_edp(intel_dp)) | ||
1023 | ironlake_edp_backlight_on(dev); | ||
1024 | 1216 | ||
1025 | intel_dp->dpms_mode = DRM_MODE_DPMS_ON; | 1217 | intel_dp->dpms_mode = DRM_MODE_DPMS_ON; |
1218 | |||
1219 | if (HAS_PCH_CPT(dev)) | ||
1220 | intel_cpt_verify_modeset(dev, intel_crtc->pipe); | ||
1026 | } | 1221 | } |
1027 | 1222 | ||
1028 | static void | 1223 | static void |
@@ -1034,28 +1229,27 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
1034 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | 1229 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
1035 | 1230 | ||
1036 | if (mode != DRM_MODE_DPMS_ON) { | 1231 | if (mode != DRM_MODE_DPMS_ON) { |
1232 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1037 | if (is_edp(intel_dp)) | 1233 | if (is_edp(intel_dp)) |
1038 | ironlake_edp_backlight_off(dev); | 1234 | ironlake_edp_backlight_off(intel_dp); |
1039 | intel_dp_sink_dpms(intel_dp, mode); | 1235 | intel_dp_sink_dpms(intel_dp, mode); |
1040 | intel_dp_link_down(intel_dp); | 1236 | intel_dp_link_down(intel_dp); |
1041 | if (is_edp(intel_dp)) | 1237 | ironlake_edp_panel_off(encoder); |
1042 | ironlake_edp_panel_off(dev); | ||
1043 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) | 1238 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) |
1044 | ironlake_edp_pll_off(encoder); | 1239 | ironlake_edp_pll_off(encoder); |
1240 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
1045 | } else { | 1241 | } else { |
1046 | if (is_edp(intel_dp)) | 1242 | ironlake_edp_panel_vdd_on(intel_dp); |
1047 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1048 | intel_dp_sink_dpms(intel_dp, mode); | 1243 | intel_dp_sink_dpms(intel_dp, mode); |
1049 | if (!(dp_reg & DP_PORT_EN)) { | 1244 | if (!(dp_reg & DP_PORT_EN)) { |
1050 | intel_dp_start_link_train(intel_dp); | 1245 | intel_dp_start_link_train(intel_dp); |
1051 | if (is_edp(intel_dp)) { | 1246 | ironlake_edp_panel_on(intel_dp); |
1052 | ironlake_edp_panel_on(intel_dp); | 1247 | ironlake_edp_panel_vdd_off(intel_dp, true); |
1053 | ironlake_edp_panel_vdd_off(intel_dp); | ||
1054 | } | ||
1055 | intel_dp_complete_link_train(intel_dp); | 1248 | intel_dp_complete_link_train(intel_dp); |
1056 | } | 1249 | ironlake_edp_backlight_on(intel_dp); |
1057 | if (is_edp(intel_dp)) | 1250 | } else |
1058 | ironlake_edp_backlight_on(dev); | 1251 | ironlake_edp_panel_vdd_off(intel_dp, false); |
1252 | ironlake_edp_backlight_on(intel_dp); | ||
1059 | } | 1253 | } |
1060 | intel_dp->dpms_mode = mode; | 1254 | intel_dp->dpms_mode = mode; |
1061 | } | 1255 | } |
@@ -1364,7 +1558,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1364 | DP_LINK_CONFIGURATION_SIZE); | 1558 | DP_LINK_CONFIGURATION_SIZE); |
1365 | 1559 | ||
1366 | DP |= DP_PORT_EN; | 1560 | DP |= DP_PORT_EN; |
1367 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1561 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1368 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1562 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1369 | else | 1563 | else |
1370 | DP &= ~DP_LINK_TRAIN_MASK; | 1564 | DP &= ~DP_LINK_TRAIN_MASK; |
@@ -1383,7 +1577,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1383 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1577 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1384 | } | 1578 | } |
1385 | 1579 | ||
1386 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1580 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1387 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; | 1581 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; |
1388 | else | 1582 | else |
1389 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1583 | reg = DP | DP_LINK_TRAIN_PAT_1; |
@@ -1458,7 +1652,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1458 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1652 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1459 | } | 1653 | } |
1460 | 1654 | ||
1461 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1655 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1462 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; | 1656 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; |
1463 | else | 1657 | else |
1464 | reg = DP | DP_LINK_TRAIN_PAT_2; | 1658 | reg = DP | DP_LINK_TRAIN_PAT_2; |
@@ -1499,7 +1693,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1499 | ++tries; | 1693 | ++tries; |
1500 | } | 1694 | } |
1501 | 1695 | ||
1502 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1696 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1503 | reg = DP | DP_LINK_TRAIN_OFF_CPT; | 1697 | reg = DP | DP_LINK_TRAIN_OFF_CPT; |
1504 | else | 1698 | else |
1505 | reg = DP | DP_LINK_TRAIN_OFF; | 1699 | reg = DP | DP_LINK_TRAIN_OFF; |
@@ -1529,7 +1723,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1529 | udelay(100); | 1723 | udelay(100); |
1530 | } | 1724 | } |
1531 | 1725 | ||
1532 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) { | 1726 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) { |
1533 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1727 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1534 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); | 1728 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); |
1535 | } else { | 1729 | } else { |
@@ -1578,13 +1772,14 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1578 | 1772 | ||
1579 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); | 1773 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
1580 | POSTING_READ(intel_dp->output_reg); | 1774 | POSTING_READ(intel_dp->output_reg); |
1775 | msleep(intel_dp->panel_power_down_delay); | ||
1581 | } | 1776 | } |
1582 | 1777 | ||
1583 | static bool | 1778 | static bool |
1584 | intel_dp_get_dpcd(struct intel_dp *intel_dp) | 1779 | intel_dp_get_dpcd(struct intel_dp *intel_dp) |
1585 | { | 1780 | { |
1586 | if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, | 1781 | if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, |
1587 | sizeof (intel_dp->dpcd)) && | 1782 | sizeof(intel_dp->dpcd)) && |
1588 | (intel_dp->dpcd[DP_DPCD_REV] != 0)) { | 1783 | (intel_dp->dpcd[DP_DPCD_REV] != 0)) { |
1589 | return true; | 1784 | return true; |
1590 | } | 1785 | } |
@@ -1592,6 +1787,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
1592 | return false; | 1787 | return false; |
1593 | } | 1788 | } |
1594 | 1789 | ||
1790 | static bool | ||
1791 | intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) | ||
1792 | { | ||
1793 | int ret; | ||
1794 | |||
1795 | ret = intel_dp_aux_native_read_retry(intel_dp, | ||
1796 | DP_DEVICE_SERVICE_IRQ_VECTOR, | ||
1797 | sink_irq_vector, 1); | ||
1798 | if (!ret) | ||
1799 | return false; | ||
1800 | |||
1801 | return true; | ||
1802 | } | ||
1803 | |||
1804 | static void | ||
1805 | intel_dp_handle_test_request(struct intel_dp *intel_dp) | ||
1806 | { | ||
1807 | /* NAK by default */ | ||
1808 | intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); | ||
1809 | } | ||
1810 | |||
1595 | /* | 1811 | /* |
1596 | * According to DP spec | 1812 | * According to DP spec |
1597 | * 5.1.2: | 1813 | * 5.1.2: |
@@ -1604,6 +1820,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
1604 | static void | 1820 | static void |
1605 | intel_dp_check_link_status(struct intel_dp *intel_dp) | 1821 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
1606 | { | 1822 | { |
1823 | u8 sink_irq_vector; | ||
1824 | |||
1607 | if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) | 1825 | if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) |
1608 | return; | 1826 | return; |
1609 | 1827 | ||
@@ -1622,6 +1840,20 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
1622 | return; | 1840 | return; |
1623 | } | 1841 | } |
1624 | 1842 | ||
1843 | /* Try to read the source of the interrupt */ | ||
1844 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | ||
1845 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { | ||
1846 | /* Clear interrupt source */ | ||
1847 | intel_dp_aux_native_write_1(intel_dp, | ||
1848 | DP_DEVICE_SERVICE_IRQ_VECTOR, | ||
1849 | sink_irq_vector); | ||
1850 | |||
1851 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) | ||
1852 | intel_dp_handle_test_request(intel_dp); | ||
1853 | if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) | ||
1854 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); | ||
1855 | } | ||
1856 | |||
1625 | if (!intel_channel_eq_ok(intel_dp)) { | 1857 | if (!intel_channel_eq_ok(intel_dp)) { |
1626 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", | 1858 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", |
1627 | drm_get_encoder_name(&intel_dp->base.base)); | 1859 | drm_get_encoder_name(&intel_dp->base.base)); |
@@ -1683,6 +1915,31 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
1683 | return intel_dp_detect_dpcd(intel_dp); | 1915 | return intel_dp_detect_dpcd(intel_dp); |
1684 | } | 1916 | } |
1685 | 1917 | ||
1918 | static struct edid * | ||
1919 | intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | ||
1920 | { | ||
1921 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1922 | struct edid *edid; | ||
1923 | |||
1924 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1925 | edid = drm_get_edid(connector, adapter); | ||
1926 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
1927 | return edid; | ||
1928 | } | ||
1929 | |||
1930 | static int | ||
1931 | intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) | ||
1932 | { | ||
1933 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1934 | int ret; | ||
1935 | |||
1936 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1937 | ret = intel_ddc_get_modes(connector, adapter); | ||
1938 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
1939 | return ret; | ||
1940 | } | ||
1941 | |||
1942 | |||
1686 | /** | 1943 | /** |
1687 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. | 1944 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. |
1688 | * | 1945 | * |
@@ -1715,7 +1972,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
1715 | if (intel_dp->force_audio) { | 1972 | if (intel_dp->force_audio) { |
1716 | intel_dp->has_audio = intel_dp->force_audio > 0; | 1973 | intel_dp->has_audio = intel_dp->force_audio > 0; |
1717 | } else { | 1974 | } else { |
1718 | edid = drm_get_edid(connector, &intel_dp->adapter); | 1975 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
1719 | if (edid) { | 1976 | if (edid) { |
1720 | intel_dp->has_audio = drm_detect_monitor_audio(edid); | 1977 | intel_dp->has_audio = drm_detect_monitor_audio(edid); |
1721 | connector->display_info.raw_edid = NULL; | 1978 | connector->display_info.raw_edid = NULL; |
@@ -1736,28 +1993,36 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1736 | /* We should parse the EDID data and find out if it has an audio sink | 1993 | /* We should parse the EDID data and find out if it has an audio sink |
1737 | */ | 1994 | */ |
1738 | 1995 | ||
1739 | ret = intel_ddc_get_modes(connector, &intel_dp->adapter); | 1996 | ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); |
1740 | if (ret) { | 1997 | if (ret) { |
1741 | if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) { | 1998 | if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) { |
1742 | struct drm_display_mode *newmode; | 1999 | struct drm_display_mode *newmode; |
1743 | list_for_each_entry(newmode, &connector->probed_modes, | 2000 | list_for_each_entry(newmode, &connector->probed_modes, |
1744 | head) { | 2001 | head) { |
1745 | if (newmode->type & DRM_MODE_TYPE_PREFERRED) { | 2002 | if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) { |
1746 | dev_priv->panel_fixed_mode = | 2003 | intel_dp->panel_fixed_mode = |
1747 | drm_mode_duplicate(dev, newmode); | 2004 | drm_mode_duplicate(dev, newmode); |
1748 | break; | 2005 | break; |
1749 | } | 2006 | } |
1750 | } | 2007 | } |
1751 | } | 2008 | } |
1752 | |||
1753 | return ret; | 2009 | return ret; |
1754 | } | 2010 | } |
1755 | 2011 | ||
1756 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 2012 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ |
1757 | if (is_edp(intel_dp)) { | 2013 | if (is_edp(intel_dp)) { |
1758 | if (dev_priv->panel_fixed_mode != NULL) { | 2014 | /* initialize panel mode from VBT if available for eDP */ |
2015 | if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { | ||
2016 | intel_dp->panel_fixed_mode = | ||
2017 | drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); | ||
2018 | if (intel_dp->panel_fixed_mode) { | ||
2019 | intel_dp->panel_fixed_mode->type |= | ||
2020 | DRM_MODE_TYPE_PREFERRED; | ||
2021 | } | ||
2022 | } | ||
2023 | if (intel_dp->panel_fixed_mode) { | ||
1759 | struct drm_display_mode *mode; | 2024 | struct drm_display_mode *mode; |
1760 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 2025 | mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); |
1761 | drm_mode_probed_add(connector, mode); | 2026 | drm_mode_probed_add(connector, mode); |
1762 | return 1; | 2027 | return 1; |
1763 | } | 2028 | } |
@@ -1772,7 +2037,7 @@ intel_dp_detect_audio(struct drm_connector *connector) | |||
1772 | struct edid *edid; | 2037 | struct edid *edid; |
1773 | bool has_audio = false; | 2038 | bool has_audio = false; |
1774 | 2039 | ||
1775 | edid = drm_get_edid(connector, &intel_dp->adapter); | 2040 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
1776 | if (edid) { | 2041 | if (edid) { |
1777 | has_audio = drm_detect_monitor_audio(edid); | 2042 | has_audio = drm_detect_monitor_audio(edid); |
1778 | 2043 | ||
@@ -1839,7 +2104,7 @@ done: | |||
1839 | } | 2104 | } |
1840 | 2105 | ||
1841 | static void | 2106 | static void |
1842 | intel_dp_destroy (struct drm_connector *connector) | 2107 | intel_dp_destroy(struct drm_connector *connector) |
1843 | { | 2108 | { |
1844 | struct drm_device *dev = connector->dev; | 2109 | struct drm_device *dev = connector->dev; |
1845 | 2110 | ||
@@ -1857,6 +2122,10 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
1857 | 2122 | ||
1858 | i2c_del_adapter(&intel_dp->adapter); | 2123 | i2c_del_adapter(&intel_dp->adapter); |
1859 | drm_encoder_cleanup(encoder); | 2124 | drm_encoder_cleanup(encoder); |
2125 | if (is_edp(intel_dp)) { | ||
2126 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | ||
2127 | ironlake_panel_vdd_off_sync(intel_dp); | ||
2128 | } | ||
1860 | kfree(intel_dp); | 2129 | kfree(intel_dp); |
1861 | } | 2130 | } |
1862 | 2131 | ||
@@ -1896,7 +2165,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder) | |||
1896 | 2165 | ||
1897 | /* Return which DP Port should be selected for Transcoder DP control */ | 2166 | /* Return which DP Port should be selected for Transcoder DP control */ |
1898 | int | 2167 | int |
1899 | intel_trans_dp_port_sel (struct drm_crtc *crtc) | 2168 | intel_trans_dp_port_sel(struct drm_crtc *crtc) |
1900 | { | 2169 | { |
1901 | struct drm_device *dev = crtc->dev; | 2170 | struct drm_device *dev = crtc->dev; |
1902 | struct drm_mode_config *mode_config = &dev->mode_config; | 2171 | struct drm_mode_config *mode_config = &dev->mode_config; |
@@ -1993,10 +2262,13 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1993 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | 2262 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
1994 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 2263 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
1995 | 2264 | ||
1996 | if (is_edp(intel_dp)) | 2265 | if (is_edp(intel_dp)) { |
1997 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 2266 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); |
2267 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, | ||
2268 | ironlake_panel_vdd_work); | ||
2269 | } | ||
1998 | 2270 | ||
1999 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 2271 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
2000 | connector->interlace_allowed = true; | 2272 | connector->interlace_allowed = true; |
2001 | connector->doublescan_allowed = 0; | 2273 | connector->doublescan_allowed = 0; |
2002 | 2274 | ||
@@ -2032,25 +2304,60 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2032 | break; | 2304 | break; |
2033 | } | 2305 | } |
2034 | 2306 | ||
2035 | intel_dp_i2c_init(intel_dp, intel_connector, name); | ||
2036 | |||
2037 | /* Cache some DPCD data in the eDP case */ | 2307 | /* Cache some DPCD data in the eDP case */ |
2038 | if (is_edp(intel_dp)) { | 2308 | if (is_edp(intel_dp)) { |
2039 | bool ret; | 2309 | bool ret; |
2040 | u32 pp_on, pp_div; | 2310 | struct edp_power_seq cur, vbt; |
2311 | u32 pp_on, pp_off, pp_div; | ||
2041 | 2312 | ||
2042 | pp_on = I915_READ(PCH_PP_ON_DELAYS); | 2313 | pp_on = I915_READ(PCH_PP_ON_DELAYS); |
2314 | pp_off = I915_READ(PCH_PP_OFF_DELAYS); | ||
2043 | pp_div = I915_READ(PCH_PP_DIVISOR); | 2315 | pp_div = I915_READ(PCH_PP_DIVISOR); |
2044 | 2316 | ||
2045 | /* Get T3 & T12 values (note: VESA not bspec terminology) */ | 2317 | /* Pull timing values out of registers */ |
2046 | dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16; | 2318 | cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> |
2047 | dev_priv->panel_t3 /= 10; /* t3 in 100us units */ | 2319 | PANEL_POWER_UP_DELAY_SHIFT; |
2048 | dev_priv->panel_t12 = pp_div & 0xf; | 2320 | |
2049 | dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ | 2321 | cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> |
2322 | PANEL_LIGHT_ON_DELAY_SHIFT; | ||
2323 | |||
2324 | cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> | ||
2325 | PANEL_LIGHT_OFF_DELAY_SHIFT; | ||
2326 | |||
2327 | cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> | ||
2328 | PANEL_POWER_DOWN_DELAY_SHIFT; | ||
2329 | |||
2330 | cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> | ||
2331 | PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; | ||
2332 | |||
2333 | DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", | ||
2334 | cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); | ||
2335 | |||
2336 | vbt = dev_priv->edp.pps; | ||
2337 | |||
2338 | DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", | ||
2339 | vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); | ||
2340 | |||
2341 | #define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10) | ||
2342 | |||
2343 | intel_dp->panel_power_up_delay = get_delay(t1_t3); | ||
2344 | intel_dp->backlight_on_delay = get_delay(t8); | ||
2345 | intel_dp->backlight_off_delay = get_delay(t9); | ||
2346 | intel_dp->panel_power_down_delay = get_delay(t10); | ||
2347 | intel_dp->panel_power_cycle_delay = get_delay(t11_t12); | ||
2348 | |||
2349 | DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", | ||
2350 | intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, | ||
2351 | intel_dp->panel_power_cycle_delay); | ||
2352 | |||
2353 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", | ||
2354 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); | ||
2355 | |||
2356 | intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay; | ||
2050 | 2357 | ||
2051 | ironlake_edp_panel_vdd_on(intel_dp); | 2358 | ironlake_edp_panel_vdd_on(intel_dp); |
2052 | ret = intel_dp_get_dpcd(intel_dp); | 2359 | ret = intel_dp_get_dpcd(intel_dp); |
2053 | ironlake_edp_panel_vdd_off(intel_dp); | 2360 | ironlake_edp_panel_vdd_off(intel_dp, false); |
2054 | if (ret) { | 2361 | if (ret) { |
2055 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) | 2362 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
2056 | dev_priv->no_aux_handshake = | 2363 | dev_priv->no_aux_handshake = |
@@ -2065,18 +2372,11 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2065 | } | 2372 | } |
2066 | } | 2373 | } |
2067 | 2374 | ||
2375 | intel_dp_i2c_init(intel_dp, intel_connector, name); | ||
2376 | |||
2068 | intel_encoder->hot_plug = intel_dp_hot_plug; | 2377 | intel_encoder->hot_plug = intel_dp_hot_plug; |
2069 | 2378 | ||
2070 | if (is_edp(intel_dp)) { | 2379 | if (is_edp(intel_dp)) { |
2071 | /* initialize panel mode from VBT if available for eDP */ | ||
2072 | if (dev_priv->lfp_lvds_vbt_mode) { | ||
2073 | dev_priv->panel_fixed_mode = | ||
2074 | drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); | ||
2075 | if (dev_priv->panel_fixed_mode) { | ||
2076 | dev_priv->panel_fixed_mode->type |= | ||
2077 | DRM_MODE_TYPE_PREFERRED; | ||
2078 | } | ||
2079 | } | ||
2080 | dev_priv->int_edp_connector = connector; | 2380 | dev_priv->int_edp_connector = connector; |
2081 | intel_panel_setup_backlight(dev); | 2381 | intel_panel_setup_backlight(dev); |
2082 | } | 2382 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index fe1099d8817e..bd9a604b73da 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -34,7 +34,7 @@ | |||
34 | #define _wait_for(COND, MS, W) ({ \ | 34 | #define _wait_for(COND, MS, W) ({ \ |
35 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ | 35 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ |
36 | int ret__ = 0; \ | 36 | int ret__ = 0; \ |
37 | while (! (COND)) { \ | 37 | while (!(COND)) { \ |
38 | if (time_after(jiffies, timeout__)) { \ | 38 | if (time_after(jiffies, timeout__)) { \ |
39 | ret__ = -ETIMEDOUT; \ | 39 | ret__ = -ETIMEDOUT; \ |
40 | break; \ | 40 | break; \ |
@@ -49,10 +49,10 @@ | |||
49 | 49 | ||
50 | #define MSLEEP(x) do { \ | 50 | #define MSLEEP(x) do { \ |
51 | if (in_dbg_master()) \ | 51 | if (in_dbg_master()) \ |
52 | mdelay(x); \ | 52 | mdelay(x); \ |
53 | else \ | 53 | else \ |
54 | msleep(x); \ | 54 | msleep(x); \ |
55 | } while(0) | 55 | } while (0) |
56 | 56 | ||
57 | #define KHz(x) (1000*x) | 57 | #define KHz(x) (1000*x) |
58 | #define MHz(x) KHz(1000*x) | 58 | #define MHz(x) KHz(1000*x) |
@@ -171,6 +171,9 @@ struct intel_crtc { | |||
171 | int16_t cursor_width, cursor_height; | 171 | int16_t cursor_width, cursor_height; |
172 | bool cursor_visible; | 172 | bool cursor_visible; |
173 | unsigned int bpp; | 173 | unsigned int bpp; |
174 | |||
175 | bool no_pll; /* tertiary pipe for IVB */ | ||
176 | bool use_pll_a; | ||
174 | }; | 177 | }; |
175 | 178 | ||
176 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 179 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
@@ -184,7 +187,7 @@ struct intel_crtc { | |||
184 | #define DIP_VERSION_AVI 0x2 | 187 | #define DIP_VERSION_AVI 0x2 |
185 | #define DIP_LEN_AVI 13 | 188 | #define DIP_LEN_AVI 13 |
186 | 189 | ||
187 | #define DIP_TYPE_SPD 0x3 | 190 | #define DIP_TYPE_SPD 0x83 |
188 | #define DIP_VERSION_SPD 0x1 | 191 | #define DIP_VERSION_SPD 0x1 |
189 | #define DIP_LEN_SPD 25 | 192 | #define DIP_LEN_SPD 25 |
190 | #define DIP_SPD_UNKNOWN 0 | 193 | #define DIP_SPD_UNKNOWN 0 |
@@ -284,7 +287,7 @@ void | |||
284 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 287 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
285 | struct drm_display_mode *adjusted_mode); | 288 | struct drm_display_mode *adjusted_mode); |
286 | extern bool intel_dpd_is_edp(struct drm_device *dev); | 289 | extern bool intel_dpd_is_edp(struct drm_device *dev); |
287 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); | 290 | extern void intel_edp_link_config(struct intel_encoder *, int *, int *); |
288 | extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); | 291 | extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); |
289 | 292 | ||
290 | /* intel_panel.c */ | 293 | /* intel_panel.c */ |
@@ -304,8 +307,8 @@ extern void intel_panel_destroy_backlight(struct drm_device *dev); | |||
304 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); | 307 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); |
305 | 308 | ||
306 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 309 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
307 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | 310 | extern void intel_encoder_prepare(struct drm_encoder *encoder); |
308 | extern void intel_encoder_commit (struct drm_encoder *encoder); | 311 | extern void intel_encoder_commit(struct drm_encoder *encoder); |
309 | extern void intel_encoder_destroy(struct drm_encoder *encoder); | 312 | extern void intel_encoder_destroy(struct drm_encoder *encoder); |
310 | 313 | ||
311 | static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) | 314 | static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) |
@@ -377,4 +380,8 @@ extern void intel_fb_output_poll_changed(struct drm_device *dev); | |||
377 | extern void intel_fb_restore_mode(struct drm_device *dev); | 380 | extern void intel_fb_restore_mode(struct drm_device *dev); |
378 | 381 | ||
379 | extern void intel_init_clock_gating(struct drm_device *dev); | 382 | extern void intel_init_clock_gating(struct drm_device *dev); |
383 | extern void intel_write_eld(struct drm_encoder *encoder, | ||
384 | struct drm_display_mode *mode); | ||
385 | extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); | ||
386 | |||
380 | #endif /* __INTEL_DRV_H__ */ | 387 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 226ba830f383..d4f5a0b2120d 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -69,8 +69,7 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame) | |||
69 | frame->checksum = 0; | 69 | frame->checksum = 0; |
70 | frame->ecc = 0; | 70 | frame->ecc = 0; |
71 | 71 | ||
72 | /* Header isn't part of the checksum */ | 72 | for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++) |
73 | for (i = 5; i < frame->len; i++) | ||
74 | sum += data[i]; | 73 | sum += data[i]; |
75 | 74 | ||
76 | frame->checksum = 0x100 - sum; | 75 | frame->checksum = 0x100 - sum; |
@@ -104,7 +103,7 @@ static u32 intel_infoframe_flags(struct dip_infoframe *frame) | |||
104 | flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC; | 103 | flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC; |
105 | break; | 104 | break; |
106 | case DIP_TYPE_SPD: | 105 | case DIP_TYPE_SPD: |
107 | flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC; | 106 | flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC; |
108 | break; | 107 | break; |
109 | default: | 108 | default: |
110 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); | 109 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); |
@@ -165,9 +164,9 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder, | |||
165 | 164 | ||
166 | flags = intel_infoframe_index(frame); | 165 | flags = intel_infoframe_index(frame); |
167 | 166 | ||
168 | val &= ~VIDEO_DIP_SELECT_MASK; | 167 | val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ |
169 | 168 | ||
170 | I915_WRITE(reg, val | flags); | 169 | I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); |
171 | 170 | ||
172 | for (i = 0; i < len; i += 4) { | 171 | for (i = 0; i < len; i += 4) { |
173 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); | 172 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); |
@@ -245,16 +244,17 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
245 | sdvox |= HDMI_MODE_SELECT; | 244 | sdvox |= HDMI_MODE_SELECT; |
246 | 245 | ||
247 | if (intel_hdmi->has_audio) { | 246 | if (intel_hdmi->has_audio) { |
247 | DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", | ||
248 | pipe_name(intel_crtc->pipe)); | ||
248 | sdvox |= SDVO_AUDIO_ENABLE; | 249 | sdvox |= SDVO_AUDIO_ENABLE; |
249 | sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; | 250 | sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; |
251 | intel_write_eld(encoder, adjusted_mode); | ||
250 | } | 252 | } |
251 | 253 | ||
252 | if (intel_crtc->pipe == 1) { | 254 | if (HAS_PCH_CPT(dev)) |
253 | if (HAS_PCH_CPT(dev)) | 255 | sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); |
254 | sdvox |= PORT_TRANS_B_SEL_CPT; | 256 | else if (intel_crtc->pipe == 1) |
255 | else | 257 | sdvox |= SDVO_PIPE_B_SELECT; |
256 | sdvox |= SDVO_PIPE_B_SELECT; | ||
257 | } | ||
258 | 258 | ||
259 | I915_WRITE(intel_hdmi->sdvox_reg, sdvox); | 259 | I915_WRITE(intel_hdmi->sdvox_reg, sdvox); |
260 | POSTING_READ(intel_hdmi->sdvox_reg); | 260 | POSTING_READ(intel_hdmi->sdvox_reg); |
@@ -486,6 +486,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
486 | struct intel_encoder *intel_encoder; | 486 | struct intel_encoder *intel_encoder; |
487 | struct intel_connector *intel_connector; | 487 | struct intel_connector *intel_connector; |
488 | struct intel_hdmi *intel_hdmi; | 488 | struct intel_hdmi *intel_hdmi; |
489 | int i; | ||
489 | 490 | ||
490 | intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); | 491 | intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); |
491 | if (!intel_hdmi) | 492 | if (!intel_hdmi) |
@@ -511,7 +512,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
511 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 512 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
512 | connector->interlace_allowed = 0; | 513 | connector->interlace_allowed = 0; |
513 | connector->doublescan_allowed = 0; | 514 | connector->doublescan_allowed = 0; |
514 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 515 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
515 | 516 | ||
516 | /* Set up the DDC bus. */ | 517 | /* Set up the DDC bus. */ |
517 | if (sdvox_reg == SDVOB) { | 518 | if (sdvox_reg == SDVOB) { |
@@ -538,10 +539,14 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
538 | 539 | ||
539 | intel_hdmi->sdvox_reg = sdvox_reg; | 540 | intel_hdmi->sdvox_reg = sdvox_reg; |
540 | 541 | ||
541 | if (!HAS_PCH_SPLIT(dev)) | 542 | if (!HAS_PCH_SPLIT(dev)) { |
542 | intel_hdmi->write_infoframe = i9xx_write_infoframe; | 543 | intel_hdmi->write_infoframe = i9xx_write_infoframe; |
543 | else | 544 | I915_WRITE(VIDEO_DIP_CTL, 0); |
545 | } else { | ||
544 | intel_hdmi->write_infoframe = ironlake_write_infoframe; | 546 | intel_hdmi->write_infoframe = ironlake_write_infoframe; |
547 | for_each_pipe(i) | ||
548 | I915_WRITE(TVIDEO_DIP_CTL(i), 0); | ||
549 | } | ||
545 | 550 | ||
546 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); | 551 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); |
547 | 552 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index d98cee60b602..9ed5380e5a53 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -422,13 +422,7 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) | |||
422 | { | 422 | { |
423 | struct intel_gmbus *bus = to_intel_gmbus(adapter); | 423 | struct intel_gmbus *bus = to_intel_gmbus(adapter); |
424 | 424 | ||
425 | /* speed: | 425 | bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed; |
426 | * 0x0 = 100 KHz | ||
427 | * 0x1 = 50 KHz | ||
428 | * 0x2 = 400 KHz | ||
429 | * 0x3 = 1000 Khz | ||
430 | */ | ||
431 | bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8); | ||
432 | } | 426 | } |
433 | 427 | ||
434 | void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) | 428 | void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 31da77f5c051..42f165a520de 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -888,9 +888,11 @@ bool intel_lvds_init(struct drm_device *dev) | |||
888 | intel_encoder->type = INTEL_OUTPUT_LVDS; | 888 | intel_encoder->type = INTEL_OUTPUT_LVDS; |
889 | 889 | ||
890 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | 890 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); |
891 | intel_encoder->crtc_mask = (1 << 1); | 891 | if (HAS_PCH_SPLIT(dev)) |
892 | if (INTEL_INFO(dev)->gen >= 5) | 892 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
893 | intel_encoder->crtc_mask |= (1 << 0); | 893 | else |
894 | intel_encoder->crtc_mask = (1 << 1); | ||
895 | |||
894 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | 896 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); |
895 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | 897 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); |
896 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 898 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 3b26a3ba02dd..be2c6fe07d12 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/i2c.h> | 27 | #include <linux/i2c.h> |
28 | #include <linux/fb.h> | 28 | #include <linux/fb.h> |
29 | #include <drm/drm_edid.h> | ||
29 | #include "drmP.h" | 30 | #include "drmP.h" |
30 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
31 | #include "i915_drv.h" | 32 | #include "i915_drv.h" |
@@ -74,6 +75,7 @@ int intel_ddc_get_modes(struct drm_connector *connector, | |||
74 | if (edid) { | 75 | if (edid) { |
75 | drm_mode_connector_update_edid_property(connector, edid); | 76 | drm_mode_connector_update_edid_property(connector, edid); |
76 | ret = drm_add_edid_modes(connector, edid); | 77 | ret = drm_add_edid_modes(connector, edid); |
78 | drm_edid_to_eld(connector, edid); | ||
77 | connector->display_info.raw_edid = NULL; | 79 | connector->display_info.raw_edid = NULL; |
78 | kfree(edid); | 80 | kfree(edid); |
79 | } | 81 | } |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b8e8158bb16e..289140bc83cb 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -51,61 +51,61 @@ | |||
51 | #define MBOX_ASLE (1<<2) | 51 | #define MBOX_ASLE (1<<2) |
52 | 52 | ||
53 | struct opregion_header { | 53 | struct opregion_header { |
54 | u8 signature[16]; | 54 | u8 signature[16]; |
55 | u32 size; | 55 | u32 size; |
56 | u32 opregion_ver; | 56 | u32 opregion_ver; |
57 | u8 bios_ver[32]; | 57 | u8 bios_ver[32]; |
58 | u8 vbios_ver[16]; | 58 | u8 vbios_ver[16]; |
59 | u8 driver_ver[16]; | 59 | u8 driver_ver[16]; |
60 | u32 mboxes; | 60 | u32 mboxes; |
61 | u8 reserved[164]; | 61 | u8 reserved[164]; |
62 | } __attribute__((packed)); | 62 | } __attribute__((packed)); |
63 | 63 | ||
64 | /* OpRegion mailbox #1: public ACPI methods */ | 64 | /* OpRegion mailbox #1: public ACPI methods */ |
65 | struct opregion_acpi { | 65 | struct opregion_acpi { |
66 | u32 drdy; /* driver readiness */ | 66 | u32 drdy; /* driver readiness */ |
67 | u32 csts; /* notification status */ | 67 | u32 csts; /* notification status */ |
68 | u32 cevt; /* current event */ | 68 | u32 cevt; /* current event */ |
69 | u8 rsvd1[20]; | 69 | u8 rsvd1[20]; |
70 | u32 didl[8]; /* supported display devices ID list */ | 70 | u32 didl[8]; /* supported display devices ID list */ |
71 | u32 cpdl[8]; /* currently presented display list */ | 71 | u32 cpdl[8]; /* currently presented display list */ |
72 | u32 cadl[8]; /* currently active display list */ | 72 | u32 cadl[8]; /* currently active display list */ |
73 | u32 nadl[8]; /* next active devices list */ | 73 | u32 nadl[8]; /* next active devices list */ |
74 | u32 aslp; /* ASL sleep time-out */ | 74 | u32 aslp; /* ASL sleep time-out */ |
75 | u32 tidx; /* toggle table index */ | 75 | u32 tidx; /* toggle table index */ |
76 | u32 chpd; /* current hotplug enable indicator */ | 76 | u32 chpd; /* current hotplug enable indicator */ |
77 | u32 clid; /* current lid state*/ | 77 | u32 clid; /* current lid state*/ |
78 | u32 cdck; /* current docking state */ | 78 | u32 cdck; /* current docking state */ |
79 | u32 sxsw; /* Sx state resume */ | 79 | u32 sxsw; /* Sx state resume */ |
80 | u32 evts; /* ASL supported events */ | 80 | u32 evts; /* ASL supported events */ |
81 | u32 cnot; /* current OS notification */ | 81 | u32 cnot; /* current OS notification */ |
82 | u32 nrdy; /* driver status */ | 82 | u32 nrdy; /* driver status */ |
83 | u8 rsvd2[60]; | 83 | u8 rsvd2[60]; |
84 | } __attribute__((packed)); | 84 | } __attribute__((packed)); |
85 | 85 | ||
86 | /* OpRegion mailbox #2: SWSCI */ | 86 | /* OpRegion mailbox #2: SWSCI */ |
87 | struct opregion_swsci { | 87 | struct opregion_swsci { |
88 | u32 scic; /* SWSCI command|status|data */ | 88 | u32 scic; /* SWSCI command|status|data */ |
89 | u32 parm; /* command parameters */ | 89 | u32 parm; /* command parameters */ |
90 | u32 dslp; /* driver sleep time-out */ | 90 | u32 dslp; /* driver sleep time-out */ |
91 | u8 rsvd[244]; | 91 | u8 rsvd[244]; |
92 | } __attribute__((packed)); | 92 | } __attribute__((packed)); |
93 | 93 | ||
94 | /* OpRegion mailbox #3: ASLE */ | 94 | /* OpRegion mailbox #3: ASLE */ |
95 | struct opregion_asle { | 95 | struct opregion_asle { |
96 | u32 ardy; /* driver readiness */ | 96 | u32 ardy; /* driver readiness */ |
97 | u32 aslc; /* ASLE interrupt command */ | 97 | u32 aslc; /* ASLE interrupt command */ |
98 | u32 tche; /* technology enabled indicator */ | 98 | u32 tche; /* technology enabled indicator */ |
99 | u32 alsi; /* current ALS illuminance reading */ | 99 | u32 alsi; /* current ALS illuminance reading */ |
100 | u32 bclp; /* backlight brightness to set */ | 100 | u32 bclp; /* backlight brightness to set */ |
101 | u32 pfit; /* panel fitting state */ | 101 | u32 pfit; /* panel fitting state */ |
102 | u32 cblv; /* current brightness level */ | 102 | u32 cblv; /* current brightness level */ |
103 | u16 bclm[20]; /* backlight level duty cycle mapping table */ | 103 | u16 bclm[20]; /* backlight level duty cycle mapping table */ |
104 | u32 cpfm; /* current panel fitting mode */ | 104 | u32 cpfm; /* current panel fitting mode */ |
105 | u32 epfm; /* enabled panel fitting modes */ | 105 | u32 epfm; /* enabled panel fitting modes */ |
106 | u8 plut[74]; /* panel LUT and identifier */ | 106 | u8 plut[74]; /* panel LUT and identifier */ |
107 | u32 pfmb; /* PWM freq and min brightness */ | 107 | u32 pfmb; /* PWM freq and min brightness */ |
108 | u8 rsvd[102]; | 108 | u8 rsvd[102]; |
109 | } __attribute__((packed)); | 109 | } __attribute__((packed)); |
110 | 110 | ||
111 | /* ASLE irq request bits */ | 111 | /* ASLE irq request bits */ |
@@ -361,7 +361,7 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
361 | 361 | ||
362 | list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { | 362 | list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { |
363 | if (i >= 8) { | 363 | if (i >= 8) { |
364 | dev_printk (KERN_ERR, &dev->pdev->dev, | 364 | dev_printk(KERN_ERR, &dev->pdev->dev, |
365 | "More than 8 outputs detected\n"); | 365 | "More than 8 outputs detected\n"); |
366 | return; | 366 | return; |
367 | } | 367 | } |
@@ -387,7 +387,7 @@ blind_set: | |||
387 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 387 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
388 | int output_type = ACPI_OTHER_OUTPUT; | 388 | int output_type = ACPI_OTHER_OUTPUT; |
389 | if (i >= 8) { | 389 | if (i >= 8) { |
390 | dev_printk (KERN_ERR, &dev->pdev->dev, | 390 | dev_printk(KERN_ERR, &dev->pdev->dev, |
391 | "More than 8 outputs detected\n"); | 391 | "More than 8 outputs detected\n"); |
392 | return; | 392 | return; |
393 | } | 393 | } |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d36038086826..cdf17d4cc1f7 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -117,57 +117,57 @@ | |||
117 | 117 | ||
118 | /* memory bufferd overlay registers */ | 118 | /* memory bufferd overlay registers */ |
119 | struct overlay_registers { | 119 | struct overlay_registers { |
120 | u32 OBUF_0Y; | 120 | u32 OBUF_0Y; |
121 | u32 OBUF_1Y; | 121 | u32 OBUF_1Y; |
122 | u32 OBUF_0U; | 122 | u32 OBUF_0U; |
123 | u32 OBUF_0V; | 123 | u32 OBUF_0V; |
124 | u32 OBUF_1U; | 124 | u32 OBUF_1U; |
125 | u32 OBUF_1V; | 125 | u32 OBUF_1V; |
126 | u32 OSTRIDE; | 126 | u32 OSTRIDE; |
127 | u32 YRGB_VPH; | 127 | u32 YRGB_VPH; |
128 | u32 UV_VPH; | 128 | u32 UV_VPH; |
129 | u32 HORZ_PH; | 129 | u32 HORZ_PH; |
130 | u32 INIT_PHS; | 130 | u32 INIT_PHS; |
131 | u32 DWINPOS; | 131 | u32 DWINPOS; |
132 | u32 DWINSZ; | 132 | u32 DWINSZ; |
133 | u32 SWIDTH; | 133 | u32 SWIDTH; |
134 | u32 SWIDTHSW; | 134 | u32 SWIDTHSW; |
135 | u32 SHEIGHT; | 135 | u32 SHEIGHT; |
136 | u32 YRGBSCALE; | 136 | u32 YRGBSCALE; |
137 | u32 UVSCALE; | 137 | u32 UVSCALE; |
138 | u32 OCLRC0; | 138 | u32 OCLRC0; |
139 | u32 OCLRC1; | 139 | u32 OCLRC1; |
140 | u32 DCLRKV; | 140 | u32 DCLRKV; |
141 | u32 DCLRKM; | 141 | u32 DCLRKM; |
142 | u32 SCLRKVH; | 142 | u32 SCLRKVH; |
143 | u32 SCLRKVL; | 143 | u32 SCLRKVL; |
144 | u32 SCLRKEN; | 144 | u32 SCLRKEN; |
145 | u32 OCONFIG; | 145 | u32 OCONFIG; |
146 | u32 OCMD; | 146 | u32 OCMD; |
147 | u32 RESERVED1; /* 0x6C */ | 147 | u32 RESERVED1; /* 0x6C */ |
148 | u32 OSTART_0Y; | 148 | u32 OSTART_0Y; |
149 | u32 OSTART_1Y; | 149 | u32 OSTART_1Y; |
150 | u32 OSTART_0U; | 150 | u32 OSTART_0U; |
151 | u32 OSTART_0V; | 151 | u32 OSTART_0V; |
152 | u32 OSTART_1U; | 152 | u32 OSTART_1U; |
153 | u32 OSTART_1V; | 153 | u32 OSTART_1V; |
154 | u32 OTILEOFF_0Y; | 154 | u32 OTILEOFF_0Y; |
155 | u32 OTILEOFF_1Y; | 155 | u32 OTILEOFF_1Y; |
156 | u32 OTILEOFF_0U; | 156 | u32 OTILEOFF_0U; |
157 | u32 OTILEOFF_0V; | 157 | u32 OTILEOFF_0V; |
158 | u32 OTILEOFF_1U; | 158 | u32 OTILEOFF_1U; |
159 | u32 OTILEOFF_1V; | 159 | u32 OTILEOFF_1V; |
160 | u32 FASTHSCALE; /* 0xA0 */ | 160 | u32 FASTHSCALE; /* 0xA0 */ |
161 | u32 UVSCALEV; /* 0xA4 */ | 161 | u32 UVSCALEV; /* 0xA4 */ |
162 | u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */ | 162 | u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */ |
163 | u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */ | 163 | u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */ |
164 | u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES]; | 164 | u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES]; |
165 | u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */ | 165 | u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */ |
166 | u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES]; | 166 | u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES]; |
167 | u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */ | 167 | u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */ |
168 | u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES]; | 168 | u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES]; |
169 | u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */ | 169 | u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */ |
170 | u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; | 170 | u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; |
171 | }; | 171 | }; |
172 | 172 | ||
173 | struct intel_overlay { | 173 | struct intel_overlay { |
@@ -192,7 +192,7 @@ struct intel_overlay { | |||
192 | static struct overlay_registers * | 192 | static struct overlay_registers * |
193 | intel_overlay_map_regs(struct intel_overlay *overlay) | 193 | intel_overlay_map_regs(struct intel_overlay *overlay) |
194 | { | 194 | { |
195 | drm_i915_private_t *dev_priv = overlay->dev->dev_private; | 195 | drm_i915_private_t *dev_priv = overlay->dev->dev_private; |
196 | struct overlay_registers *regs; | 196 | struct overlay_registers *regs; |
197 | 197 | ||
198 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) | 198 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) |
@@ -264,7 +264,7 @@ i830_activate_pipe_a(struct drm_device *dev) | |||
264 | 264 | ||
265 | mode = drm_mode_duplicate(dev, &vesa_640x480); | 265 | mode = drm_mode_duplicate(dev, &vesa_640x480); |
266 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | 266 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); |
267 | if(!drm_crtc_helper_set_mode(&crtc->base, mode, | 267 | if (!drm_crtc_helper_set_mode(&crtc->base, mode, |
268 | crtc->base.x, crtc->base.y, | 268 | crtc->base.x, crtc->base.y, |
269 | crtc->base.fb)) | 269 | crtc->base.fb)) |
270 | return 0; | 270 | return 0; |
@@ -332,7 +332,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
332 | bool load_polyphase_filter) | 332 | bool load_polyphase_filter) |
333 | { | 333 | { |
334 | struct drm_device *dev = overlay->dev; | 334 | struct drm_device *dev = overlay->dev; |
335 | drm_i915_private_t *dev_priv = dev->dev_private; | 335 | drm_i915_private_t *dev_priv = dev->dev_private; |
336 | struct drm_i915_gem_request *request; | 336 | struct drm_i915_gem_request *request; |
337 | u32 flip_addr = overlay->flip_addr; | 337 | u32 flip_addr = overlay->flip_addr; |
338 | u32 tmp; | 338 | u32 tmp; |
@@ -359,7 +359,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
359 | } | 359 | } |
360 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 360 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
361 | OUT_RING(flip_addr); | 361 | OUT_RING(flip_addr); |
362 | ADVANCE_LP_RING(); | 362 | ADVANCE_LP_RING(); |
363 | 363 | ||
364 | ret = i915_add_request(LP_RING(dev_priv), NULL, request); | 364 | ret = i915_add_request(LP_RING(dev_priv), NULL, request); |
365 | if (ret) { | 365 | if (ret) { |
@@ -583,7 +583,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) | |||
583 | ret = ((offset + width + mask) >> shift) - (offset >> shift); | 583 | ret = ((offset + width + mask) >> shift) - (offset >> shift); |
584 | if (!IS_GEN2(dev)) | 584 | if (!IS_GEN2(dev)) |
585 | ret <<= 1; | 585 | ret <<= 1; |
586 | ret -=1; | 586 | ret -= 1; |
587 | return ret << 2; | 587 | return ret << 2; |
588 | } | 588 | } |
589 | 589 | ||
@@ -817,7 +817,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
817 | regs->SWIDTHSW = calc_swidthsw(overlay->dev, | 817 | regs->SWIDTHSW = calc_swidthsw(overlay->dev, |
818 | params->offset_Y, tmp_width); | 818 | params->offset_Y, tmp_width); |
819 | regs->SHEIGHT = params->src_h; | 819 | regs->SHEIGHT = params->src_h; |
820 | regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y; | 820 | regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y; |
821 | regs->OSTRIDE = params->stride_Y; | 821 | regs->OSTRIDE = params->stride_Y; |
822 | 822 | ||
823 | if (params->format & I915_OVERLAY_YUV_PLANAR) { | 823 | if (params->format & I915_OVERLAY_YUV_PLANAR) { |
@@ -917,7 +917,7 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay) | |||
917 | * line with the intel documentation for the i965 | 917 | * line with the intel documentation for the i965 |
918 | */ | 918 | */ |
919 | if (INTEL_INFO(dev)->gen >= 4) { | 919 | if (INTEL_INFO(dev)->gen >= 4) { |
920 | /* on i965 use the PGM reg to read out the autoscaler values */ | 920 | /* on i965 use the PGM reg to read out the autoscaler values */ |
921 | ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; | 921 | ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; |
922 | } else { | 922 | } else { |
923 | if (pfit_control & VERT_AUTO_SCALE) | 923 | if (pfit_control & VERT_AUTO_SCALE) |
@@ -1098,7 +1098,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev) | |||
1098 | } | 1098 | } |
1099 | 1099 | ||
1100 | int intel_overlay_put_image(struct drm_device *dev, void *data, | 1100 | int intel_overlay_put_image(struct drm_device *dev, void *data, |
1101 | struct drm_file *file_priv) | 1101 | struct drm_file *file_priv) |
1102 | { | 1102 | { |
1103 | struct drm_intel_overlay_put_image *put_image_rec = data; | 1103 | struct drm_intel_overlay_put_image *put_image_rec = data; |
1104 | drm_i915_private_t *dev_priv = dev->dev_private; | 1104 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -1301,10 +1301,10 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs) | |||
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | int intel_overlay_attrs(struct drm_device *dev, void *data, | 1303 | int intel_overlay_attrs(struct drm_device *dev, void *data, |
1304 | struct drm_file *file_priv) | 1304 | struct drm_file *file_priv) |
1305 | { | 1305 | { |
1306 | struct drm_intel_overlay_attrs *attrs = data; | 1306 | struct drm_intel_overlay_attrs *attrs = data; |
1307 | drm_i915_private_t *dev_priv = dev->dev_private; | 1307 | drm_i915_private_t *dev_priv = dev->dev_private; |
1308 | struct intel_overlay *overlay; | 1308 | struct intel_overlay *overlay; |
1309 | struct overlay_registers *regs; | 1309 | struct overlay_registers *regs; |
1310 | int ret; | 1310 | int ret; |
@@ -1393,7 +1393,7 @@ out_unlock: | |||
1393 | 1393 | ||
1394 | void intel_setup_overlay(struct drm_device *dev) | 1394 | void intel_setup_overlay(struct drm_device *dev) |
1395 | { | 1395 | { |
1396 | drm_i915_private_t *dev_priv = dev->dev_private; | 1396 | drm_i915_private_t *dev_priv = dev->dev_private; |
1397 | struct intel_overlay *overlay; | 1397 | struct intel_overlay *overlay; |
1398 | struct drm_i915_gem_object *reg_bo; | 1398 | struct drm_i915_gem_object *reg_bo; |
1399 | struct overlay_registers *regs; | 1399 | struct overlay_registers *regs; |
@@ -1421,24 +1421,24 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1421 | ret = i915_gem_attach_phys_object(dev, reg_bo, | 1421 | ret = i915_gem_attach_phys_object(dev, reg_bo, |
1422 | I915_GEM_PHYS_OVERLAY_REGS, | 1422 | I915_GEM_PHYS_OVERLAY_REGS, |
1423 | PAGE_SIZE); | 1423 | PAGE_SIZE); |
1424 | if (ret) { | 1424 | if (ret) { |
1425 | DRM_ERROR("failed to attach phys overlay regs\n"); | 1425 | DRM_ERROR("failed to attach phys overlay regs\n"); |
1426 | goto out_free_bo; | 1426 | goto out_free_bo; |
1427 | } | 1427 | } |
1428 | overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; | 1428 | overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; |
1429 | } else { | 1429 | } else { |
1430 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); | 1430 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); |
1431 | if (ret) { | 1431 | if (ret) { |
1432 | DRM_ERROR("failed to pin overlay register bo\n"); | 1432 | DRM_ERROR("failed to pin overlay register bo\n"); |
1433 | goto out_free_bo; | 1433 | goto out_free_bo; |
1434 | } | 1434 | } |
1435 | overlay->flip_addr = reg_bo->gtt_offset; | 1435 | overlay->flip_addr = reg_bo->gtt_offset; |
1436 | 1436 | ||
1437 | ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); | 1437 | ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); |
1438 | if (ret) { | 1438 | if (ret) { |
1439 | DRM_ERROR("failed to move overlay register bo into the GTT\n"); | 1439 | DRM_ERROR("failed to move overlay register bo into the GTT\n"); |
1440 | goto out_unpin_bo; | 1440 | goto out_unpin_bo; |
1441 | } | 1441 | } |
1442 | } | 1442 | } |
1443 | 1443 | ||
1444 | /* init all values */ | 1444 | /* init all values */ |
@@ -1525,7 +1525,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, | |||
1525 | struct intel_overlay_error_state * | 1525 | struct intel_overlay_error_state * |
1526 | intel_overlay_capture_error_state(struct drm_device *dev) | 1526 | intel_overlay_capture_error_state(struct drm_device *dev) |
1527 | { | 1527 | { |
1528 | drm_i915_private_t *dev_priv = dev->dev_private; | 1528 | drm_i915_private_t *dev_priv = dev->dev_private; |
1529 | struct intel_overlay *overlay = dev_priv->overlay; | 1529 | struct intel_overlay *overlay = dev_priv->overlay; |
1530 | struct intel_overlay_error_state *error; | 1530 | struct intel_overlay_error_state *error; |
1531 | struct overlay_registers __iomem *regs; | 1531 | struct overlay_registers __iomem *regs; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a9e0c7bcd317..499d4c0dbeeb 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -84,7 +84,7 @@ intel_pch_panel_fitting(struct drm_device *dev, | |||
84 | if (scaled_width > scaled_height) { /* pillar */ | 84 | if (scaled_width > scaled_height) { /* pillar */ |
85 | width = scaled_height / mode->vdisplay; | 85 | width = scaled_height / mode->vdisplay; |
86 | if (width & 1) | 86 | if (width & 1) |
87 | width++; | 87 | width++; |
88 | x = (adjusted_mode->hdisplay - width + 1) / 2; | 88 | x = (adjusted_mode->hdisplay - width + 1) / 2; |
89 | y = 0; | 89 | y = 0; |
90 | height = adjusted_mode->vdisplay; | 90 | height = adjusted_mode->vdisplay; |
@@ -206,7 +206,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev) | |||
206 | if (IS_PINEVIEW(dev)) | 206 | if (IS_PINEVIEW(dev)) |
207 | val >>= 1; | 207 | val >>= 1; |
208 | 208 | ||
209 | if (is_backlight_combination_mode(dev)){ | 209 | if (is_backlight_combination_mode(dev)) { |
210 | u8 lbpc; | 210 | u8 lbpc; |
211 | 211 | ||
212 | val &= ~1; | 212 | val &= ~1; |
@@ -226,7 +226,7 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) | |||
226 | I915_WRITE(BLC_PWM_CPU_CTL, val | level); | 226 | I915_WRITE(BLC_PWM_CPU_CTL, val | level); |
227 | } | 227 | } |
228 | 228 | ||
229 | void intel_panel_set_backlight(struct drm_device *dev, u32 level) | 229 | static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level) |
230 | { | 230 | { |
231 | struct drm_i915_private *dev_priv = dev->dev_private; | 231 | struct drm_i915_private *dev_priv = dev->dev_private; |
232 | u32 tmp; | 232 | u32 tmp; |
@@ -236,7 +236,7 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) | |||
236 | if (HAS_PCH_SPLIT(dev)) | 236 | if (HAS_PCH_SPLIT(dev)) |
237 | return intel_pch_panel_set_backlight(dev, level); | 237 | return intel_pch_panel_set_backlight(dev, level); |
238 | 238 | ||
239 | if (is_backlight_combination_mode(dev)){ | 239 | if (is_backlight_combination_mode(dev)) { |
240 | u32 max = intel_panel_get_max_backlight(dev); | 240 | u32 max = intel_panel_get_max_backlight(dev); |
241 | u8 lbpc; | 241 | u8 lbpc; |
242 | 242 | ||
@@ -254,16 +254,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) | |||
254 | I915_WRITE(BLC_PWM_CTL, tmp | level); | 254 | I915_WRITE(BLC_PWM_CTL, tmp | level); |
255 | } | 255 | } |
256 | 256 | ||
257 | void intel_panel_disable_backlight(struct drm_device *dev) | 257 | void intel_panel_set_backlight(struct drm_device *dev, u32 level) |
258 | { | 258 | { |
259 | struct drm_i915_private *dev_priv = dev->dev_private; | 259 | struct drm_i915_private *dev_priv = dev->dev_private; |
260 | 260 | ||
261 | if (dev_priv->backlight_enabled) { | 261 | dev_priv->backlight_level = level; |
262 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | 262 | if (dev_priv->backlight_enabled) |
263 | dev_priv->backlight_enabled = false; | 263 | intel_panel_actually_set_backlight(dev, level); |
264 | } | 264 | } |
265 | |||
266 | void intel_panel_disable_backlight(struct drm_device *dev) | ||
267 | { | ||
268 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
265 | 269 | ||
266 | intel_panel_set_backlight(dev, 0); | 270 | dev_priv->backlight_enabled = false; |
271 | intel_panel_actually_set_backlight(dev, 0); | ||
267 | } | 272 | } |
268 | 273 | ||
269 | void intel_panel_enable_backlight(struct drm_device *dev) | 274 | void intel_panel_enable_backlight(struct drm_device *dev) |
@@ -273,8 +278,8 @@ void intel_panel_enable_backlight(struct drm_device *dev) | |||
273 | if (dev_priv->backlight_level == 0) | 278 | if (dev_priv->backlight_level == 0) |
274 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); | 279 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); |
275 | 280 | ||
276 | intel_panel_set_backlight(dev, dev_priv->backlight_level); | ||
277 | dev_priv->backlight_enabled = true; | 281 | dev_priv->backlight_enabled = true; |
282 | intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); | ||
278 | } | 283 | } |
279 | 284 | ||
280 | static void intel_panel_init_backlight(struct drm_device *dev) | 285 | static void intel_panel_init_backlight(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c30626ea9f93..ca70e2f10445 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -34,6 +34,16 @@ | |||
34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
35 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
36 | 36 | ||
37 | /* | ||
38 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | ||
39 | * over cache flushing. | ||
40 | */ | ||
41 | struct pipe_control { | ||
42 | struct drm_i915_gem_object *obj; | ||
43 | volatile u32 *cpu_page; | ||
44 | u32 gtt_offset; | ||
45 | }; | ||
46 | |||
37 | static inline int ring_space(struct intel_ring_buffer *ring) | 47 | static inline int ring_space(struct intel_ring_buffer *ring) |
38 | { | 48 | { |
39 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); | 49 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); |
@@ -123,6 +133,118 @@ render_ring_flush(struct intel_ring_buffer *ring, | |||
123 | return 0; | 133 | return 0; |
124 | } | 134 | } |
125 | 135 | ||
136 | /** | ||
137 | * Emits a PIPE_CONTROL with a non-zero post-sync operation, for | ||
138 | * implementing two workarounds on gen6. From section 1.4.7.1 | ||
139 | * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: | ||
140 | * | ||
141 | * [DevSNB-C+{W/A}] Before any depth stall flush (including those | ||
142 | * produced by non-pipelined state commands), software needs to first | ||
143 | * send a PIPE_CONTROL with no bits set except Post-Sync Operation != | ||
144 | * 0. | ||
145 | * | ||
146 | * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable | ||
147 | * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. | ||
148 | * | ||
149 | * And the workaround for these two requires this workaround first: | ||
150 | * | ||
151 | * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent | ||
152 | * BEFORE the pipe-control with a post-sync op and no write-cache | ||
153 | * flushes. | ||
154 | * | ||
155 | * And this last workaround is tricky because of the requirements on | ||
156 | * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM | ||
157 | * volume 2 part 1: | ||
158 | * | ||
159 | * "1 of the following must also be set: | ||
160 | * - Render Target Cache Flush Enable ([12] of DW1) | ||
161 | * - Depth Cache Flush Enable ([0] of DW1) | ||
162 | * - Stall at Pixel Scoreboard ([1] of DW1) | ||
163 | * - Depth Stall ([13] of DW1) | ||
164 | * - Post-Sync Operation ([13] of DW1) | ||
165 | * - Notify Enable ([8] of DW1)" | ||
166 | * | ||
167 | * The cache flushes require the workaround flush that triggered this | ||
168 | * one, so we can't use it. Depth stall would trigger the same. | ||
169 | * Post-sync nonzero is what triggered this second workaround, so we | ||
170 | * can't use that one either. Notify enable is IRQs, which aren't | ||
171 | * really our business. That leaves only stall at scoreboard. | ||
172 | */ | ||
173 | static int | ||
174 | intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) | ||
175 | { | ||
176 | struct pipe_control *pc = ring->private; | ||
177 | u32 scratch_addr = pc->gtt_offset + 128; | ||
178 | int ret; | ||
179 | |||
180 | |||
181 | ret = intel_ring_begin(ring, 6); | ||
182 | if (ret) | ||
183 | return ret; | ||
184 | |||
185 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | ||
186 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | | ||
187 | PIPE_CONTROL_STALL_AT_SCOREBOARD); | ||
188 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | ||
189 | intel_ring_emit(ring, 0); /* low dword */ | ||
190 | intel_ring_emit(ring, 0); /* high dword */ | ||
191 | intel_ring_emit(ring, MI_NOOP); | ||
192 | intel_ring_advance(ring); | ||
193 | |||
194 | ret = intel_ring_begin(ring, 6); | ||
195 | if (ret) | ||
196 | return ret; | ||
197 | |||
198 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | ||
199 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); | ||
200 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | ||
201 | intel_ring_emit(ring, 0); | ||
202 | intel_ring_emit(ring, 0); | ||
203 | intel_ring_emit(ring, MI_NOOP); | ||
204 | intel_ring_advance(ring); | ||
205 | |||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int | ||
210 | gen6_render_ring_flush(struct intel_ring_buffer *ring, | ||
211 | u32 invalidate_domains, u32 flush_domains) | ||
212 | { | ||
213 | u32 flags = 0; | ||
214 | struct pipe_control *pc = ring->private; | ||
215 | u32 scratch_addr = pc->gtt_offset + 128; | ||
216 | int ret; | ||
217 | |||
218 | /* Force SNB workarounds for PIPE_CONTROL flushes */ | ||
219 | intel_emit_post_sync_nonzero_flush(ring); | ||
220 | |||
221 | /* Just flush everything. Experiments have shown that reducing the | ||
222 | * number of bits based on the write domains has little performance | ||
223 | * impact. | ||
224 | */ | ||
225 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | ||
226 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | ||
227 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | ||
228 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | ||
229 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | ||
230 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | ||
231 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | ||
232 | |||
233 | ret = intel_ring_begin(ring, 6); | ||
234 | if (ret) | ||
235 | return ret; | ||
236 | |||
237 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | ||
238 | intel_ring_emit(ring, flags); | ||
239 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
240 | intel_ring_emit(ring, 0); /* lower dword */ | ||
241 | intel_ring_emit(ring, 0); /* uppwer dword */ | ||
242 | intel_ring_emit(ring, MI_NOOP); | ||
243 | intel_ring_advance(ring); | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | |||
126 | static void ring_write_tail(struct intel_ring_buffer *ring, | 248 | static void ring_write_tail(struct intel_ring_buffer *ring, |
127 | u32 value) | 249 | u32 value) |
128 | { | 250 | { |
@@ -206,16 +328,6 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
206 | return 0; | 328 | return 0; |
207 | } | 329 | } |
208 | 330 | ||
209 | /* | ||
210 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | ||
211 | * over cache flushing. | ||
212 | */ | ||
213 | struct pipe_control { | ||
214 | struct drm_i915_gem_object *obj; | ||
215 | volatile u32 *cpu_page; | ||
216 | u32 gtt_offset; | ||
217 | }; | ||
218 | |||
219 | static int | 331 | static int |
220 | init_pipe_control(struct intel_ring_buffer *ring) | 332 | init_pipe_control(struct intel_ring_buffer *ring) |
221 | { | 333 | { |
@@ -296,8 +408,7 @@ static int init_render_ring(struct intel_ring_buffer *ring) | |||
296 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); | 408 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); |
297 | } | 409 | } |
298 | 410 | ||
299 | if (INTEL_INFO(dev)->gen >= 6) { | 411 | if (INTEL_INFO(dev)->gen >= 5) { |
300 | } else if (IS_GEN5(dev)) { | ||
301 | ret = init_pipe_control(ring); | 412 | ret = init_pipe_control(ring); |
302 | if (ret) | 413 | if (ret) |
303 | return ret; | 414 | return ret; |
@@ -315,83 +426,131 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) | |||
315 | } | 426 | } |
316 | 427 | ||
317 | static void | 428 | static void |
318 | update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) | 429 | update_mboxes(struct intel_ring_buffer *ring, |
430 | u32 seqno, | ||
431 | u32 mmio_offset) | ||
319 | { | 432 | { |
320 | struct drm_device *dev = ring->dev; | 433 | intel_ring_emit(ring, MI_SEMAPHORE_MBOX | |
321 | struct drm_i915_private *dev_priv = dev->dev_private; | 434 | MI_SEMAPHORE_GLOBAL_GTT | |
322 | int id; | 435 | MI_SEMAPHORE_REGISTER | |
323 | 436 | MI_SEMAPHORE_UPDATE); | |
324 | /* | ||
325 | * cs -> 1 = vcs, 0 = bcs | ||
326 | * vcs -> 1 = bcs, 0 = cs, | ||
327 | * bcs -> 1 = cs, 0 = vcs. | ||
328 | */ | ||
329 | id = ring - dev_priv->ring; | ||
330 | id += 2 - i; | ||
331 | id %= 3; | ||
332 | |||
333 | intel_ring_emit(ring, | ||
334 | MI_SEMAPHORE_MBOX | | ||
335 | MI_SEMAPHORE_REGISTER | | ||
336 | MI_SEMAPHORE_UPDATE); | ||
337 | intel_ring_emit(ring, seqno); | 437 | intel_ring_emit(ring, seqno); |
338 | intel_ring_emit(ring, | 438 | intel_ring_emit(ring, mmio_offset); |
339 | RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i); | ||
340 | } | 439 | } |
341 | 440 | ||
441 | /** | ||
442 | * gen6_add_request - Update the semaphore mailbox registers | ||
443 | * | ||
444 | * @ring - ring that is adding a request | ||
445 | * @seqno - return seqno stuck into the ring | ||
446 | * | ||
447 | * Update the mailbox registers in the *other* rings with the current seqno. | ||
448 | * This acts like a signal in the canonical semaphore. | ||
449 | */ | ||
342 | static int | 450 | static int |
343 | gen6_add_request(struct intel_ring_buffer *ring, | 451 | gen6_add_request(struct intel_ring_buffer *ring, |
344 | u32 *result) | 452 | u32 *seqno) |
345 | { | 453 | { |
346 | u32 seqno; | 454 | u32 mbox1_reg; |
455 | u32 mbox2_reg; | ||
347 | int ret; | 456 | int ret; |
348 | 457 | ||
349 | ret = intel_ring_begin(ring, 10); | 458 | ret = intel_ring_begin(ring, 10); |
350 | if (ret) | 459 | if (ret) |
351 | return ret; | 460 | return ret; |
352 | 461 | ||
353 | seqno = i915_gem_get_seqno(ring->dev); | 462 | mbox1_reg = ring->signal_mbox[0]; |
354 | update_semaphore(ring, 0, seqno); | 463 | mbox2_reg = ring->signal_mbox[1]; |
355 | update_semaphore(ring, 1, seqno); | ||
356 | 464 | ||
465 | *seqno = i915_gem_get_seqno(ring->dev); | ||
466 | |||
467 | update_mboxes(ring, *seqno, mbox1_reg); | ||
468 | update_mboxes(ring, *seqno, mbox2_reg); | ||
357 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | 469 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
358 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 470 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
359 | intel_ring_emit(ring, seqno); | 471 | intel_ring_emit(ring, *seqno); |
360 | intel_ring_emit(ring, MI_USER_INTERRUPT); | 472 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
361 | intel_ring_advance(ring); | 473 | intel_ring_advance(ring); |
362 | 474 | ||
363 | *result = seqno; | ||
364 | return 0; | 475 | return 0; |
365 | } | 476 | } |
366 | 477 | ||
367 | int | 478 | /** |
368 | intel_ring_sync(struct intel_ring_buffer *ring, | 479 | * intel_ring_sync - sync the waiter to the signaller on seqno |
369 | struct intel_ring_buffer *to, | 480 | * |
481 | * @waiter - ring that is waiting | ||
482 | * @signaller - ring which has, or will signal | ||
483 | * @seqno - seqno which the waiter will block on | ||
484 | */ | ||
485 | static int | ||
486 | intel_ring_sync(struct intel_ring_buffer *waiter, | ||
487 | struct intel_ring_buffer *signaller, | ||
488 | int ring, | ||
370 | u32 seqno) | 489 | u32 seqno) |
371 | { | 490 | { |
372 | int ret; | 491 | int ret; |
492 | u32 dw1 = MI_SEMAPHORE_MBOX | | ||
493 | MI_SEMAPHORE_COMPARE | | ||
494 | MI_SEMAPHORE_REGISTER; | ||
373 | 495 | ||
374 | ret = intel_ring_begin(ring, 4); | 496 | ret = intel_ring_begin(waiter, 4); |
375 | if (ret) | 497 | if (ret) |
376 | return ret; | 498 | return ret; |
377 | 499 | ||
378 | intel_ring_emit(ring, | 500 | intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]); |
379 | MI_SEMAPHORE_MBOX | | 501 | intel_ring_emit(waiter, seqno); |
380 | MI_SEMAPHORE_REGISTER | | 502 | intel_ring_emit(waiter, 0); |
381 | intel_ring_sync_index(ring, to) << 17 | | 503 | intel_ring_emit(waiter, MI_NOOP); |
382 | MI_SEMAPHORE_COMPARE); | 504 | intel_ring_advance(waiter); |
383 | intel_ring_emit(ring, seqno); | ||
384 | intel_ring_emit(ring, 0); | ||
385 | intel_ring_emit(ring, MI_NOOP); | ||
386 | intel_ring_advance(ring); | ||
387 | 505 | ||
388 | return 0; | 506 | return 0; |
389 | } | 507 | } |
390 | 508 | ||
509 | /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */ | ||
510 | int | ||
511 | render_ring_sync_to(struct intel_ring_buffer *waiter, | ||
512 | struct intel_ring_buffer *signaller, | ||
513 | u32 seqno) | ||
514 | { | ||
515 | WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID); | ||
516 | return intel_ring_sync(waiter, | ||
517 | signaller, | ||
518 | RCS, | ||
519 | seqno); | ||
520 | } | ||
521 | |||
522 | /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */ | ||
523 | int | ||
524 | gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter, | ||
525 | struct intel_ring_buffer *signaller, | ||
526 | u32 seqno) | ||
527 | { | ||
528 | WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID); | ||
529 | return intel_ring_sync(waiter, | ||
530 | signaller, | ||
531 | VCS, | ||
532 | seqno); | ||
533 | } | ||
534 | |||
535 | /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */ | ||
536 | int | ||
537 | gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter, | ||
538 | struct intel_ring_buffer *signaller, | ||
539 | u32 seqno) | ||
540 | { | ||
541 | WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID); | ||
542 | return intel_ring_sync(waiter, | ||
543 | signaller, | ||
544 | BCS, | ||
545 | seqno); | ||
546 | } | ||
547 | |||
548 | |||
549 | |||
391 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ | 550 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
392 | do { \ | 551 | do { \ |
393 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | 552 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ |
394 | PIPE_CONTROL_DEPTH_STALL | 2); \ | 553 | PIPE_CONTROL_DEPTH_STALL); \ |
395 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ | 554 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
396 | intel_ring_emit(ring__, 0); \ | 555 | intel_ring_emit(ring__, 0); \ |
397 | intel_ring_emit(ring__, 0); \ | 556 | intel_ring_emit(ring__, 0); \ |
@@ -419,8 +578,9 @@ pc_render_add_request(struct intel_ring_buffer *ring, | |||
419 | if (ret) | 578 | if (ret) |
420 | return ret; | 579 | return ret; |
421 | 580 | ||
422 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | 581 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
423 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | 582 | PIPE_CONTROL_WRITE_FLUSH | |
583 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); | ||
424 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 584 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
425 | intel_ring_emit(ring, seqno); | 585 | intel_ring_emit(ring, seqno); |
426 | intel_ring_emit(ring, 0); | 586 | intel_ring_emit(ring, 0); |
@@ -435,8 +595,9 @@ pc_render_add_request(struct intel_ring_buffer *ring, | |||
435 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 595 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
436 | scratch_addr += 128; | 596 | scratch_addr += 128; |
437 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 597 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
438 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | 598 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
439 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | 599 | PIPE_CONTROL_WRITE_FLUSH | |
600 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | | ||
440 | PIPE_CONTROL_NOTIFY); | 601 | PIPE_CONTROL_NOTIFY); |
441 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 602 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
442 | intel_ring_emit(ring, seqno); | 603 | intel_ring_emit(ring, seqno); |
@@ -1026,7 +1187,12 @@ static const struct intel_ring_buffer render_ring = { | |||
1026 | .irq_get = render_ring_get_irq, | 1187 | .irq_get = render_ring_get_irq, |
1027 | .irq_put = render_ring_put_irq, | 1188 | .irq_put = render_ring_put_irq, |
1028 | .dispatch_execbuffer = render_ring_dispatch_execbuffer, | 1189 | .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
1029 | .cleanup = render_ring_cleanup, | 1190 | .cleanup = render_ring_cleanup, |
1191 | .sync_to = render_ring_sync_to, | ||
1192 | .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID, | ||
1193 | MI_SEMAPHORE_SYNC_RV, | ||
1194 | MI_SEMAPHORE_SYNC_RB}, | ||
1195 | .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC}, | ||
1030 | }; | 1196 | }; |
1031 | 1197 | ||
1032 | /* ring buffer for bit-stream decoder */ | 1198 | /* ring buffer for bit-stream decoder */ |
@@ -1050,23 +1216,23 @@ static const struct intel_ring_buffer bsd_ring = { | |||
1050 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, | 1216 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
1051 | u32 value) | 1217 | u32 value) |
1052 | { | 1218 | { |
1053 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 1219 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1054 | 1220 | ||
1055 | /* Every tail move must follow the sequence below */ | 1221 | /* Every tail move must follow the sequence below */ |
1056 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | 1222 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1057 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | 1223 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | |
1058 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); | 1224 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); |
1059 | I915_WRITE(GEN6_BSD_RNCID, 0x0); | 1225 | I915_WRITE(GEN6_BSD_RNCID, 0x0); |
1060 | 1226 | ||
1061 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & | 1227 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & |
1062 | GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, | 1228 | GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, |
1063 | 50)) | 1229 | 50)) |
1064 | DRM_ERROR("timed out waiting for IDLE Indicator\n"); | 1230 | DRM_ERROR("timed out waiting for IDLE Indicator\n"); |
1065 | 1231 | ||
1066 | I915_WRITE_TAIL(ring, value); | 1232 | I915_WRITE_TAIL(ring, value); |
1067 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | 1233 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1068 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | 1234 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | |
1069 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | 1235 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
1070 | } | 1236 | } |
1071 | 1237 | ||
1072 | static int gen6_ring_flush(struct intel_ring_buffer *ring, | 1238 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
@@ -1094,18 +1260,18 @@ static int | |||
1094 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1260 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1095 | u32 offset, u32 len) | 1261 | u32 offset, u32 len) |
1096 | { | 1262 | { |
1097 | int ret; | 1263 | int ret; |
1098 | 1264 | ||
1099 | ret = intel_ring_begin(ring, 2); | 1265 | ret = intel_ring_begin(ring, 2); |
1100 | if (ret) | 1266 | if (ret) |
1101 | return ret; | 1267 | return ret; |
1102 | 1268 | ||
1103 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); | 1269 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
1104 | /* bit0-7 is the length on GEN6+ */ | 1270 | /* bit0-7 is the length on GEN6+ */ |
1105 | intel_ring_emit(ring, offset); | 1271 | intel_ring_emit(ring, offset); |
1106 | intel_ring_advance(ring); | 1272 | intel_ring_advance(ring); |
1107 | 1273 | ||
1108 | return 0; | 1274 | return 0; |
1109 | } | 1275 | } |
1110 | 1276 | ||
1111 | static bool | 1277 | static bool |
@@ -1154,6 +1320,11 @@ static const struct intel_ring_buffer gen6_bsd_ring = { | |||
1154 | .irq_get = gen6_bsd_ring_get_irq, | 1320 | .irq_get = gen6_bsd_ring_get_irq, |
1155 | .irq_put = gen6_bsd_ring_put_irq, | 1321 | .irq_put = gen6_bsd_ring_put_irq, |
1156 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, | 1322 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
1323 | .sync_to = gen6_bsd_ring_sync_to, | ||
1324 | .semaphore_register = {MI_SEMAPHORE_SYNC_VR, | ||
1325 | MI_SEMAPHORE_SYNC_INVALID, | ||
1326 | MI_SEMAPHORE_SYNC_VB}, | ||
1327 | .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC}, | ||
1157 | }; | 1328 | }; |
1158 | 1329 | ||
1159 | /* Blitter support (SandyBridge+) */ | 1330 | /* Blitter support (SandyBridge+) */ |
@@ -1272,19 +1443,24 @@ static void blt_ring_cleanup(struct intel_ring_buffer *ring) | |||
1272 | } | 1443 | } |
1273 | 1444 | ||
1274 | static const struct intel_ring_buffer gen6_blt_ring = { | 1445 | static const struct intel_ring_buffer gen6_blt_ring = { |
1275 | .name = "blt ring", | 1446 | .name = "blt ring", |
1276 | .id = RING_BLT, | 1447 | .id = RING_BLT, |
1277 | .mmio_base = BLT_RING_BASE, | 1448 | .mmio_base = BLT_RING_BASE, |
1278 | .size = 32 * PAGE_SIZE, | 1449 | .size = 32 * PAGE_SIZE, |
1279 | .init = blt_ring_init, | 1450 | .init = blt_ring_init, |
1280 | .write_tail = ring_write_tail, | 1451 | .write_tail = ring_write_tail, |
1281 | .flush = blt_ring_flush, | 1452 | .flush = blt_ring_flush, |
1282 | .add_request = gen6_add_request, | 1453 | .add_request = gen6_add_request, |
1283 | .get_seqno = ring_get_seqno, | 1454 | .get_seqno = ring_get_seqno, |
1284 | .irq_get = blt_ring_get_irq, | 1455 | .irq_get = blt_ring_get_irq, |
1285 | .irq_put = blt_ring_put_irq, | 1456 | .irq_put = blt_ring_put_irq, |
1286 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, | 1457 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
1287 | .cleanup = blt_ring_cleanup, | 1458 | .cleanup = blt_ring_cleanup, |
1459 | .sync_to = gen6_blt_ring_sync_to, | ||
1460 | .semaphore_register = {MI_SEMAPHORE_SYNC_BR, | ||
1461 | MI_SEMAPHORE_SYNC_BV, | ||
1462 | MI_SEMAPHORE_SYNC_INVALID}, | ||
1463 | .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC}, | ||
1288 | }; | 1464 | }; |
1289 | 1465 | ||
1290 | int intel_init_render_ring_buffer(struct drm_device *dev) | 1466 | int intel_init_render_ring_buffer(struct drm_device *dev) |
@@ -1295,6 +1471,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1295 | *ring = render_ring; | 1471 | *ring = render_ring; |
1296 | if (INTEL_INFO(dev)->gen >= 6) { | 1472 | if (INTEL_INFO(dev)->gen >= 6) { |
1297 | ring->add_request = gen6_add_request; | 1473 | ring->add_request = gen6_add_request; |
1474 | ring->flush = gen6_render_ring_flush; | ||
1298 | ring->irq_get = gen6_render_ring_get_irq; | 1475 | ring->irq_get = gen6_render_ring_get_irq; |
1299 | ring->irq_put = gen6_render_ring_put_irq; | 1476 | ring->irq_put = gen6_render_ring_put_irq; |
1300 | } else if (IS_GEN5(dev)) { | 1477 | } else if (IS_GEN5(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 39ac2b634ae5..68281c96c558 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -2,10 +2,10 @@ | |||
2 | #define _INTEL_RINGBUFFER_H_ | 2 | #define _INTEL_RINGBUFFER_H_ |
3 | 3 | ||
4 | enum { | 4 | enum { |
5 | RCS = 0x0, | 5 | RCS = 0x0, |
6 | VCS, | 6 | VCS, |
7 | BCS, | 7 | BCS, |
8 | I915_NUM_RINGS, | 8 | I915_NUM_RINGS, |
9 | }; | 9 | }; |
10 | 10 | ||
11 | struct intel_hw_status_page { | 11 | struct intel_hw_status_page { |
@@ -75,7 +75,12 @@ struct intel_ring_buffer { | |||
75 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, | 75 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
76 | u32 offset, u32 length); | 76 | u32 offset, u32 length); |
77 | void (*cleanup)(struct intel_ring_buffer *ring); | 77 | void (*cleanup)(struct intel_ring_buffer *ring); |
78 | int (*sync_to)(struct intel_ring_buffer *ring, | ||
79 | struct intel_ring_buffer *to, | ||
80 | u32 seqno); | ||
78 | 81 | ||
82 | u32 semaphore_register[3]; /*our mbox written by others */ | ||
83 | u32 signal_mbox[2]; /* mboxes this ring signals to */ | ||
79 | /** | 84 | /** |
80 | * List of objects currently involved in rendering from the | 85 | * List of objects currently involved in rendering from the |
81 | * ringbuffer. | 86 | * ringbuffer. |
@@ -180,9 +185,6 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring, | |||
180 | void intel_ring_advance(struct intel_ring_buffer *ring); | 185 | void intel_ring_advance(struct intel_ring_buffer *ring); |
181 | 186 | ||
182 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); | 187 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
183 | int intel_ring_sync(struct intel_ring_buffer *ring, | ||
184 | struct intel_ring_buffer *to, | ||
185 | u32 seqno); | ||
186 | 188 | ||
187 | int intel_init_render_ring_buffer(struct drm_device *dev); | 189 | int intel_init_render_ring_buffer(struct drm_device *dev); |
188 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | 190 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 6348c499616f..6db3b1ccb6eb 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -43,7 +43,7 @@ | |||
43 | #define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) | 43 | #define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) |
44 | 44 | ||
45 | #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ | 45 | #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ |
46 | SDVO_TV_MASK) | 46 | SDVO_TV_MASK) |
47 | 47 | ||
48 | #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) | 48 | #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) |
49 | #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) | 49 | #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) |
@@ -288,117 +288,117 @@ static const struct _sdvo_cmd_name { | |||
288 | u8 cmd; | 288 | u8 cmd; |
289 | const char *name; | 289 | const char *name; |
290 | } sdvo_cmd_names[] = { | 290 | } sdvo_cmd_names[] = { |
291 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), | 291 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), |
292 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), | 292 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), |
293 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), | 293 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), |
294 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), | 294 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), |
295 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), | 295 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), |
296 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), | 296 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), |
297 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), | 297 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), |
298 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), | 298 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), |
299 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), | 299 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), |
300 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), | 300 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), |
301 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), | 301 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), |
302 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), | 302 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), |
303 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), | 303 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), |
304 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), | 304 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), |
305 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), | 305 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), |
306 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), | 306 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), |
307 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), | 307 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), |
308 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), | 308 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), |
309 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), | 309 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), |
310 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), | 310 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), |
311 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), | 311 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), |
312 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), | 312 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), |
313 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), | 313 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), |
314 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), | 314 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), |
315 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), | 315 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), |
316 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), | 316 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), |
317 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), | 317 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), |
318 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), | 318 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), |
319 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), | 319 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), |
320 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), | 320 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), |
321 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), | 321 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), |
322 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), | 322 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), |
323 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), | 323 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), |
324 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), | 324 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), |
325 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), | 325 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), |
326 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), | 326 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), |
327 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), | 327 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), |
328 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), | 328 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), |
329 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), | 329 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), |
330 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), | 330 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), |
331 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), | 331 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), |
332 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), | 332 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), |
333 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), | 333 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), |
334 | 334 | ||
335 | /* Add the op code for SDVO enhancements */ | 335 | /* Add the op code for SDVO enhancements */ |
336 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), | 336 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), |
337 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), | 337 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), |
338 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), | 338 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), |
339 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), | 339 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), |
340 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), | 340 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), |
341 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), | 341 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), |
342 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), | 342 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), |
343 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), | 343 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), |
344 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), | 344 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), |
345 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), | 345 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), |
346 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), | 346 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), |
347 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), | 347 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), |
348 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), | 348 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), |
349 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), | 349 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), |
350 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), | 350 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), |
351 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), | 351 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), |
352 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), | 352 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), |
353 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), | 353 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), |
354 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), | 354 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), |
355 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), | 355 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), |
356 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), | 356 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), |
357 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), | 357 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), |
358 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), | 358 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), |
359 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), | 359 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), |
360 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), | 360 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), |
361 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), | 361 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), |
362 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), | 362 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), |
363 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), | 363 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), |
364 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), | 364 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), |
365 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), | 365 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), |
366 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), | 366 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), |
367 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), | 367 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), |
368 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), | 368 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), |
369 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), | 369 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), |
370 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), | 370 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), |
371 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), | 371 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), |
372 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), | 372 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), |
373 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), | 373 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), |
374 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), | 374 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), |
375 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), | 375 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), |
376 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), | 376 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), |
377 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), | 377 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), |
378 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), | 378 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), |
379 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), | 379 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), |
380 | 380 | ||
381 | /* HDMI op code */ | 381 | /* HDMI op code */ |
382 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), | 382 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), |
383 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), | 383 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), |
384 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), | 384 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), |
385 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), | 385 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), |
386 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), | 386 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), |
387 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), | 387 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), |
388 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), | 388 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), |
389 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), | 389 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), |
390 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), | 390 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), |
391 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), | 391 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), |
392 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), | 392 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), |
393 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), | 393 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), |
394 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), | 394 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), |
395 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), | 395 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), |
396 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), | 396 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), |
397 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), | 397 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), |
398 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), | 398 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), |
399 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), | 399 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), |
400 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), | 400 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), |
401 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), | 401 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), |
402 | }; | 402 | }; |
403 | 403 | ||
404 | #define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) | 404 | #define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) |
@@ -1232,8 +1232,7 @@ static bool | |||
1232 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) | 1232 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) |
1233 | { | 1233 | { |
1234 | /* Is there more than one type of output? */ | 1234 | /* Is there more than one type of output? */ |
1235 | int caps = intel_sdvo->caps.output_flags & 0xf; | 1235 | return hweight16(intel_sdvo->caps.output_flags) > 1; |
1236 | return caps & -caps; | ||
1237 | } | 1236 | } |
1238 | 1237 | ||
1239 | static struct edid * | 1238 | static struct edid * |
@@ -1254,7 +1253,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector) | |||
1254 | } | 1253 | } |
1255 | 1254 | ||
1256 | enum drm_connector_status | 1255 | enum drm_connector_status |
1257 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | 1256 | intel_sdvo_tmds_sink_detect(struct drm_connector *connector) |
1258 | { | 1257 | { |
1259 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); | 1258 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); |
1260 | enum drm_connector_status status; | 1259 | enum drm_connector_status status; |
@@ -1349,7 +1348,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1349 | if ((intel_sdvo_connector->output_flag & response) == 0) | 1348 | if ((intel_sdvo_connector->output_flag & response) == 0) |
1350 | ret = connector_status_disconnected; | 1349 | ret = connector_status_disconnected; |
1351 | else if (IS_TMDS(intel_sdvo_connector)) | 1350 | else if (IS_TMDS(intel_sdvo_connector)) |
1352 | ret = intel_sdvo_hdmi_sink_detect(connector); | 1351 | ret = intel_sdvo_tmds_sink_detect(connector); |
1353 | else { | 1352 | else { |
1354 | struct edid *edid; | 1353 | struct edid *edid; |
1355 | 1354 | ||
@@ -1896,7 +1895,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, | |||
1896 | struct intel_sdvo *sdvo, u32 reg) | 1895 | struct intel_sdvo *sdvo, u32 reg) |
1897 | { | 1896 | { |
1898 | struct sdvo_device_mapping *mapping; | 1897 | struct sdvo_device_mapping *mapping; |
1899 | u8 pin, speed; | 1898 | u8 pin; |
1900 | 1899 | ||
1901 | if (IS_SDVOB(reg)) | 1900 | if (IS_SDVOB(reg)) |
1902 | mapping = &dev_priv->sdvo_mappings[0]; | 1901 | mapping = &dev_priv->sdvo_mappings[0]; |
@@ -1904,18 +1903,16 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, | |||
1904 | mapping = &dev_priv->sdvo_mappings[1]; | 1903 | mapping = &dev_priv->sdvo_mappings[1]; |
1905 | 1904 | ||
1906 | pin = GMBUS_PORT_DPB; | 1905 | pin = GMBUS_PORT_DPB; |
1907 | speed = GMBUS_RATE_1MHZ >> 8; | 1906 | if (mapping->initialized) |
1908 | if (mapping->initialized) { | ||
1909 | pin = mapping->i2c_pin; | 1907 | pin = mapping->i2c_pin; |
1910 | speed = mapping->i2c_speed; | ||
1911 | } | ||
1912 | 1908 | ||
1913 | if (pin < GMBUS_NUM_PORTS) { | 1909 | if (pin < GMBUS_NUM_PORTS) { |
1914 | sdvo->i2c = &dev_priv->gmbus[pin].adapter; | 1910 | sdvo->i2c = &dev_priv->gmbus[pin].adapter; |
1915 | intel_gmbus_set_speed(sdvo->i2c, speed); | 1911 | intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); |
1916 | intel_gmbus_force_bit(sdvo->i2c, true); | 1912 | intel_gmbus_force_bit(sdvo->i2c, true); |
1917 | } else | 1913 | } else { |
1918 | sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; | 1914 | sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; |
1915 | } | ||
1919 | } | 1916 | } |
1920 | 1917 | ||
1921 | static bool | 1918 | static bool |
@@ -2206,7 +2203,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) | |||
2206 | bytes[0], bytes[1]); | 2203 | bytes[0], bytes[1]); |
2207 | return false; | 2204 | return false; |
2208 | } | 2205 | } |
2209 | intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1); | 2206 | intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
2210 | 2207 | ||
2211 | return true; | 2208 | return true; |
2212 | } | 2209 | } |
@@ -2275,7 +2272,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, | |||
2275 | DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ | 2272 | DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ |
2276 | data_value[0], data_value[1], response); \ | 2273 | data_value[0], data_value[1], response); \ |
2277 | } \ | 2274 | } \ |
2278 | } while(0) | 2275 | } while (0) |
2279 | 2276 | ||
2280 | static bool | 2277 | static bool |
2281 | intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, | 2278 | intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, |
@@ -2442,7 +2439,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, | |||
2442 | 2439 | ||
2443 | if (IS_TV(intel_sdvo_connector)) | 2440 | if (IS_TV(intel_sdvo_connector)) |
2444 | return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); | 2441 | return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); |
2445 | else if(IS_LVDS(intel_sdvo_connector)) | 2442 | else if (IS_LVDS(intel_sdvo_connector)) |
2446 | return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); | 2443 | return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); |
2447 | else | 2444 | else |
2448 | return true; | 2445 | return true; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 4f4e23bc2d16..4aa6f343e49a 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h | |||
@@ -46,63 +46,63 @@ | |||
46 | #define SDVO_OUTPUT_LAST (14) | 46 | #define SDVO_OUTPUT_LAST (14) |
47 | 47 | ||
48 | struct intel_sdvo_caps { | 48 | struct intel_sdvo_caps { |
49 | u8 vendor_id; | 49 | u8 vendor_id; |
50 | u8 device_id; | 50 | u8 device_id; |
51 | u8 device_rev_id; | 51 | u8 device_rev_id; |
52 | u8 sdvo_version_major; | 52 | u8 sdvo_version_major; |
53 | u8 sdvo_version_minor; | 53 | u8 sdvo_version_minor; |
54 | unsigned int sdvo_inputs_mask:2; | 54 | unsigned int sdvo_inputs_mask:2; |
55 | unsigned int smooth_scaling:1; | 55 | unsigned int smooth_scaling:1; |
56 | unsigned int sharp_scaling:1; | 56 | unsigned int sharp_scaling:1; |
57 | unsigned int up_scaling:1; | 57 | unsigned int up_scaling:1; |
58 | unsigned int down_scaling:1; | 58 | unsigned int down_scaling:1; |
59 | unsigned int stall_support:1; | 59 | unsigned int stall_support:1; |
60 | unsigned int pad:1; | 60 | unsigned int pad:1; |
61 | u16 output_flags; | 61 | u16 output_flags; |
62 | } __attribute__((packed)); | 62 | } __attribute__((packed)); |
63 | 63 | ||
64 | /** This matches the EDID DTD structure, more or less */ | 64 | /** This matches the EDID DTD structure, more or less */ |
65 | struct intel_sdvo_dtd { | 65 | struct intel_sdvo_dtd { |
66 | struct { | 66 | struct { |
67 | u16 clock; /**< pixel clock, in 10kHz units */ | 67 | u16 clock; /**< pixel clock, in 10kHz units */ |
68 | u8 h_active; /**< lower 8 bits (pixels) */ | 68 | u8 h_active; /**< lower 8 bits (pixels) */ |
69 | u8 h_blank; /**< lower 8 bits (pixels) */ | 69 | u8 h_blank; /**< lower 8 bits (pixels) */ |
70 | u8 h_high; /**< upper 4 bits each h_active, h_blank */ | 70 | u8 h_high; /**< upper 4 bits each h_active, h_blank */ |
71 | u8 v_active; /**< lower 8 bits (lines) */ | 71 | u8 v_active; /**< lower 8 bits (lines) */ |
72 | u8 v_blank; /**< lower 8 bits (lines) */ | 72 | u8 v_blank; /**< lower 8 bits (lines) */ |
73 | u8 v_high; /**< upper 4 bits each v_active, v_blank */ | 73 | u8 v_high; /**< upper 4 bits each v_active, v_blank */ |
74 | } part1; | 74 | } part1; |
75 | 75 | ||
76 | struct { | 76 | struct { |
77 | u8 h_sync_off; /**< lower 8 bits, from hblank start */ | 77 | u8 h_sync_off; /**< lower 8 bits, from hblank start */ |
78 | u8 h_sync_width; /**< lower 8 bits (pixels) */ | 78 | u8 h_sync_width; /**< lower 8 bits (pixels) */ |
79 | /** lower 4 bits each vsync offset, vsync width */ | 79 | /** lower 4 bits each vsync offset, vsync width */ |
80 | u8 v_sync_off_width; | 80 | u8 v_sync_off_width; |
81 | /** | 81 | /** |
82 | * 2 high bits of hsync offset, 2 high bits of hsync width, | 82 | * 2 high bits of hsync offset, 2 high bits of hsync width, |
83 | * bits 4-5 of vsync offset, and 2 high bits of vsync width. | 83 | * bits 4-5 of vsync offset, and 2 high bits of vsync width. |
84 | */ | 84 | */ |
85 | u8 sync_off_width_high; | 85 | u8 sync_off_width_high; |
86 | u8 dtd_flags; | 86 | u8 dtd_flags; |
87 | u8 sdvo_flags; | 87 | u8 sdvo_flags; |
88 | /** bits 6-7 of vsync offset at bits 6-7 */ | 88 | /** bits 6-7 of vsync offset at bits 6-7 */ |
89 | u8 v_sync_off_high; | 89 | u8 v_sync_off_high; |
90 | u8 reserved; | 90 | u8 reserved; |
91 | } part2; | 91 | } part2; |
92 | } __attribute__((packed)); | 92 | } __attribute__((packed)); |
93 | 93 | ||
94 | struct intel_sdvo_pixel_clock_range { | 94 | struct intel_sdvo_pixel_clock_range { |
95 | u16 min; /**< pixel clock, in 10kHz units */ | 95 | u16 min; /**< pixel clock, in 10kHz units */ |
96 | u16 max; /**< pixel clock, in 10kHz units */ | 96 | u16 max; /**< pixel clock, in 10kHz units */ |
97 | } __attribute__((packed)); | 97 | } __attribute__((packed)); |
98 | 98 | ||
99 | struct intel_sdvo_preferred_input_timing_args { | 99 | struct intel_sdvo_preferred_input_timing_args { |
100 | u16 clock; | 100 | u16 clock; |
101 | u16 width; | 101 | u16 width; |
102 | u16 height; | 102 | u16 height; |
103 | u8 interlace:1; | 103 | u8 interlace:1; |
104 | u8 scaled:1; | 104 | u8 scaled:1; |
105 | u8 pad:6; | 105 | u8 pad:6; |
106 | } __attribute__((packed)); | 106 | } __attribute__((packed)); |
107 | 107 | ||
108 | /* I2C registers for SDVO */ | 108 | /* I2C registers for SDVO */ |
@@ -154,9 +154,9 @@ struct intel_sdvo_preferred_input_timing_args { | |||
154 | */ | 154 | */ |
155 | #define SDVO_CMD_GET_TRAINED_INPUTS 0x03 | 155 | #define SDVO_CMD_GET_TRAINED_INPUTS 0x03 |
156 | struct intel_sdvo_get_trained_inputs_response { | 156 | struct intel_sdvo_get_trained_inputs_response { |
157 | unsigned int input0_trained:1; | 157 | unsigned int input0_trained:1; |
158 | unsigned int input1_trained:1; | 158 | unsigned int input1_trained:1; |
159 | unsigned int pad:6; | 159 | unsigned int pad:6; |
160 | } __attribute__((packed)); | 160 | } __attribute__((packed)); |
161 | 161 | ||
162 | /** Returns a struct intel_sdvo_output_flags of active outputs. */ | 162 | /** Returns a struct intel_sdvo_output_flags of active outputs. */ |
@@ -177,7 +177,7 @@ struct intel_sdvo_get_trained_inputs_response { | |||
177 | */ | 177 | */ |
178 | #define SDVO_CMD_GET_IN_OUT_MAP 0x06 | 178 | #define SDVO_CMD_GET_IN_OUT_MAP 0x06 |
179 | struct intel_sdvo_in_out_map { | 179 | struct intel_sdvo_in_out_map { |
180 | u16 in0, in1; | 180 | u16 in0, in1; |
181 | }; | 181 | }; |
182 | 182 | ||
183 | /** | 183 | /** |
@@ -210,10 +210,10 @@ struct intel_sdvo_in_out_map { | |||
210 | 210 | ||
211 | #define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f | 211 | #define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f |
212 | struct intel_sdvo_get_interrupt_event_source_response { | 212 | struct intel_sdvo_get_interrupt_event_source_response { |
213 | u16 interrupt_status; | 213 | u16 interrupt_status; |
214 | unsigned int ambient_light_interrupt:1; | 214 | unsigned int ambient_light_interrupt:1; |
215 | unsigned int hdmi_audio_encrypt_change:1; | 215 | unsigned int hdmi_audio_encrypt_change:1; |
216 | unsigned int pad:6; | 216 | unsigned int pad:6; |
217 | } __attribute__((packed)); | 217 | } __attribute__((packed)); |
218 | 218 | ||
219 | /** | 219 | /** |
@@ -225,8 +225,8 @@ struct intel_sdvo_get_interrupt_event_source_response { | |||
225 | */ | 225 | */ |
226 | #define SDVO_CMD_SET_TARGET_INPUT 0x10 | 226 | #define SDVO_CMD_SET_TARGET_INPUT 0x10 |
227 | struct intel_sdvo_set_target_input_args { | 227 | struct intel_sdvo_set_target_input_args { |
228 | unsigned int target_1:1; | 228 | unsigned int target_1:1; |
229 | unsigned int pad:7; | 229 | unsigned int pad:7; |
230 | } __attribute__((packed)); | 230 | } __attribute__((packed)); |
231 | 231 | ||
232 | /** | 232 | /** |
@@ -314,57 +314,57 @@ struct intel_sdvo_set_target_input_args { | |||
314 | #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 | 314 | #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 |
315 | /** 6 bytes of bit flags for TV formats shared by all TV format functions */ | 315 | /** 6 bytes of bit flags for TV formats shared by all TV format functions */ |
316 | struct intel_sdvo_tv_format { | 316 | struct intel_sdvo_tv_format { |
317 | unsigned int ntsc_m:1; | 317 | unsigned int ntsc_m:1; |
318 | unsigned int ntsc_j:1; | 318 | unsigned int ntsc_j:1; |
319 | unsigned int ntsc_443:1; | 319 | unsigned int ntsc_443:1; |
320 | unsigned int pal_b:1; | 320 | unsigned int pal_b:1; |
321 | unsigned int pal_d:1; | 321 | unsigned int pal_d:1; |
322 | unsigned int pal_g:1; | 322 | unsigned int pal_g:1; |
323 | unsigned int pal_h:1; | 323 | unsigned int pal_h:1; |
324 | unsigned int pal_i:1; | 324 | unsigned int pal_i:1; |
325 | 325 | ||
326 | unsigned int pal_m:1; | 326 | unsigned int pal_m:1; |
327 | unsigned int pal_n:1; | 327 | unsigned int pal_n:1; |
328 | unsigned int pal_nc:1; | 328 | unsigned int pal_nc:1; |
329 | unsigned int pal_60:1; | 329 | unsigned int pal_60:1; |
330 | unsigned int secam_b:1; | 330 | unsigned int secam_b:1; |
331 | unsigned int secam_d:1; | 331 | unsigned int secam_d:1; |
332 | unsigned int secam_g:1; | 332 | unsigned int secam_g:1; |
333 | unsigned int secam_k:1; | 333 | unsigned int secam_k:1; |
334 | 334 | ||
335 | unsigned int secam_k1:1; | 335 | unsigned int secam_k1:1; |
336 | unsigned int secam_l:1; | 336 | unsigned int secam_l:1; |
337 | unsigned int secam_60:1; | 337 | unsigned int secam_60:1; |
338 | unsigned int hdtv_std_smpte_240m_1080i_59:1; | 338 | unsigned int hdtv_std_smpte_240m_1080i_59:1; |
339 | unsigned int hdtv_std_smpte_240m_1080i_60:1; | 339 | unsigned int hdtv_std_smpte_240m_1080i_60:1; |
340 | unsigned int hdtv_std_smpte_260m_1080i_59:1; | 340 | unsigned int hdtv_std_smpte_260m_1080i_59:1; |
341 | unsigned int hdtv_std_smpte_260m_1080i_60:1; | 341 | unsigned int hdtv_std_smpte_260m_1080i_60:1; |
342 | unsigned int hdtv_std_smpte_274m_1080i_50:1; | 342 | unsigned int hdtv_std_smpte_274m_1080i_50:1; |
343 | 343 | ||
344 | unsigned int hdtv_std_smpte_274m_1080i_59:1; | 344 | unsigned int hdtv_std_smpte_274m_1080i_59:1; |
345 | unsigned int hdtv_std_smpte_274m_1080i_60:1; | 345 | unsigned int hdtv_std_smpte_274m_1080i_60:1; |
346 | unsigned int hdtv_std_smpte_274m_1080p_23:1; | 346 | unsigned int hdtv_std_smpte_274m_1080p_23:1; |
347 | unsigned int hdtv_std_smpte_274m_1080p_24:1; | 347 | unsigned int hdtv_std_smpte_274m_1080p_24:1; |
348 | unsigned int hdtv_std_smpte_274m_1080p_25:1; | 348 | unsigned int hdtv_std_smpte_274m_1080p_25:1; |
349 | unsigned int hdtv_std_smpte_274m_1080p_29:1; | 349 | unsigned int hdtv_std_smpte_274m_1080p_29:1; |
350 | unsigned int hdtv_std_smpte_274m_1080p_30:1; | 350 | unsigned int hdtv_std_smpte_274m_1080p_30:1; |
351 | unsigned int hdtv_std_smpte_274m_1080p_50:1; | 351 | unsigned int hdtv_std_smpte_274m_1080p_50:1; |
352 | 352 | ||
353 | unsigned int hdtv_std_smpte_274m_1080p_59:1; | 353 | unsigned int hdtv_std_smpte_274m_1080p_59:1; |
354 | unsigned int hdtv_std_smpte_274m_1080p_60:1; | 354 | unsigned int hdtv_std_smpte_274m_1080p_60:1; |
355 | unsigned int hdtv_std_smpte_295m_1080i_50:1; | 355 | unsigned int hdtv_std_smpte_295m_1080i_50:1; |
356 | unsigned int hdtv_std_smpte_295m_1080p_50:1; | 356 | unsigned int hdtv_std_smpte_295m_1080p_50:1; |
357 | unsigned int hdtv_std_smpte_296m_720p_59:1; | 357 | unsigned int hdtv_std_smpte_296m_720p_59:1; |
358 | unsigned int hdtv_std_smpte_296m_720p_60:1; | 358 | unsigned int hdtv_std_smpte_296m_720p_60:1; |
359 | unsigned int hdtv_std_smpte_296m_720p_50:1; | 359 | unsigned int hdtv_std_smpte_296m_720p_50:1; |
360 | unsigned int hdtv_std_smpte_293m_480p_59:1; | 360 | unsigned int hdtv_std_smpte_293m_480p_59:1; |
361 | 361 | ||
362 | unsigned int hdtv_std_smpte_170m_480i_59:1; | 362 | unsigned int hdtv_std_smpte_170m_480i_59:1; |
363 | unsigned int hdtv_std_iturbt601_576i_50:1; | 363 | unsigned int hdtv_std_iturbt601_576i_50:1; |
364 | unsigned int hdtv_std_iturbt601_576p_50:1; | 364 | unsigned int hdtv_std_iturbt601_576p_50:1; |
365 | unsigned int hdtv_std_eia_7702a_480i_60:1; | 365 | unsigned int hdtv_std_eia_7702a_480i_60:1; |
366 | unsigned int hdtv_std_eia_7702a_480p_60:1; | 366 | unsigned int hdtv_std_eia_7702a_480p_60:1; |
367 | unsigned int pad:3; | 367 | unsigned int pad:3; |
368 | } __attribute__((packed)); | 368 | } __attribute__((packed)); |
369 | 369 | ||
370 | #define SDVO_CMD_GET_TV_FORMAT 0x28 | 370 | #define SDVO_CMD_GET_TV_FORMAT 0x28 |
@@ -374,53 +374,53 @@ struct intel_sdvo_tv_format { | |||
374 | /** Returns the resolutiosn that can be used with the given TV format */ | 374 | /** Returns the resolutiosn that can be used with the given TV format */ |
375 | #define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83 | 375 | #define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83 |
376 | struct intel_sdvo_sdtv_resolution_request { | 376 | struct intel_sdvo_sdtv_resolution_request { |
377 | unsigned int ntsc_m:1; | 377 | unsigned int ntsc_m:1; |
378 | unsigned int ntsc_j:1; | 378 | unsigned int ntsc_j:1; |
379 | unsigned int ntsc_443:1; | 379 | unsigned int ntsc_443:1; |
380 | unsigned int pal_b:1; | 380 | unsigned int pal_b:1; |
381 | unsigned int pal_d:1; | 381 | unsigned int pal_d:1; |
382 | unsigned int pal_g:1; | 382 | unsigned int pal_g:1; |
383 | unsigned int pal_h:1; | 383 | unsigned int pal_h:1; |
384 | unsigned int pal_i:1; | 384 | unsigned int pal_i:1; |
385 | 385 | ||
386 | unsigned int pal_m:1; | 386 | unsigned int pal_m:1; |
387 | unsigned int pal_n:1; | 387 | unsigned int pal_n:1; |
388 | unsigned int pal_nc:1; | 388 | unsigned int pal_nc:1; |
389 | unsigned int pal_60:1; | 389 | unsigned int pal_60:1; |
390 | unsigned int secam_b:1; | 390 | unsigned int secam_b:1; |
391 | unsigned int secam_d:1; | 391 | unsigned int secam_d:1; |
392 | unsigned int secam_g:1; | 392 | unsigned int secam_g:1; |
393 | unsigned int secam_k:1; | 393 | unsigned int secam_k:1; |
394 | 394 | ||
395 | unsigned int secam_k1:1; | 395 | unsigned int secam_k1:1; |
396 | unsigned int secam_l:1; | 396 | unsigned int secam_l:1; |
397 | unsigned int secam_60:1; | 397 | unsigned int secam_60:1; |
398 | unsigned int pad:5; | 398 | unsigned int pad:5; |
399 | } __attribute__((packed)); | 399 | } __attribute__((packed)); |
400 | 400 | ||
401 | struct intel_sdvo_sdtv_resolution_reply { | 401 | struct intel_sdvo_sdtv_resolution_reply { |
402 | unsigned int res_320x200:1; | 402 | unsigned int res_320x200:1; |
403 | unsigned int res_320x240:1; | 403 | unsigned int res_320x240:1; |
404 | unsigned int res_400x300:1; | 404 | unsigned int res_400x300:1; |
405 | unsigned int res_640x350:1; | 405 | unsigned int res_640x350:1; |
406 | unsigned int res_640x400:1; | 406 | unsigned int res_640x400:1; |
407 | unsigned int res_640x480:1; | 407 | unsigned int res_640x480:1; |
408 | unsigned int res_704x480:1; | 408 | unsigned int res_704x480:1; |
409 | unsigned int res_704x576:1; | 409 | unsigned int res_704x576:1; |
410 | 410 | ||
411 | unsigned int res_720x350:1; | 411 | unsigned int res_720x350:1; |
412 | unsigned int res_720x400:1; | 412 | unsigned int res_720x400:1; |
413 | unsigned int res_720x480:1; | 413 | unsigned int res_720x480:1; |
414 | unsigned int res_720x540:1; | 414 | unsigned int res_720x540:1; |
415 | unsigned int res_720x576:1; | 415 | unsigned int res_720x576:1; |
416 | unsigned int res_768x576:1; | 416 | unsigned int res_768x576:1; |
417 | unsigned int res_800x600:1; | 417 | unsigned int res_800x600:1; |
418 | unsigned int res_832x624:1; | 418 | unsigned int res_832x624:1; |
419 | 419 | ||
420 | unsigned int res_920x766:1; | 420 | unsigned int res_920x766:1; |
421 | unsigned int res_1024x768:1; | 421 | unsigned int res_1024x768:1; |
422 | unsigned int res_1280x1024:1; | 422 | unsigned int res_1280x1024:1; |
423 | unsigned int pad:5; | 423 | unsigned int pad:5; |
424 | } __attribute__((packed)); | 424 | } __attribute__((packed)); |
425 | 425 | ||
426 | /* Get supported resolution with squire pixel aspect ratio that can be | 426 | /* Get supported resolution with squire pixel aspect ratio that can be |
@@ -428,90 +428,90 @@ struct intel_sdvo_sdtv_resolution_reply { | |||
428 | #define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85 | 428 | #define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85 |
429 | 429 | ||
430 | struct intel_sdvo_hdtv_resolution_request { | 430 | struct intel_sdvo_hdtv_resolution_request { |
431 | unsigned int hdtv_std_smpte_240m_1080i_59:1; | 431 | unsigned int hdtv_std_smpte_240m_1080i_59:1; |
432 | unsigned int hdtv_std_smpte_240m_1080i_60:1; | 432 | unsigned int hdtv_std_smpte_240m_1080i_60:1; |
433 | unsigned int hdtv_std_smpte_260m_1080i_59:1; | 433 | unsigned int hdtv_std_smpte_260m_1080i_59:1; |
434 | unsigned int hdtv_std_smpte_260m_1080i_60:1; | 434 | unsigned int hdtv_std_smpte_260m_1080i_60:1; |
435 | unsigned int hdtv_std_smpte_274m_1080i_50:1; | 435 | unsigned int hdtv_std_smpte_274m_1080i_50:1; |
436 | unsigned int hdtv_std_smpte_274m_1080i_59:1; | 436 | unsigned int hdtv_std_smpte_274m_1080i_59:1; |
437 | unsigned int hdtv_std_smpte_274m_1080i_60:1; | 437 | unsigned int hdtv_std_smpte_274m_1080i_60:1; |
438 | unsigned int hdtv_std_smpte_274m_1080p_23:1; | 438 | unsigned int hdtv_std_smpte_274m_1080p_23:1; |
439 | 439 | ||
440 | unsigned int hdtv_std_smpte_274m_1080p_24:1; | 440 | unsigned int hdtv_std_smpte_274m_1080p_24:1; |
441 | unsigned int hdtv_std_smpte_274m_1080p_25:1; | 441 | unsigned int hdtv_std_smpte_274m_1080p_25:1; |
442 | unsigned int hdtv_std_smpte_274m_1080p_29:1; | 442 | unsigned int hdtv_std_smpte_274m_1080p_29:1; |
443 | unsigned int hdtv_std_smpte_274m_1080p_30:1; | 443 | unsigned int hdtv_std_smpte_274m_1080p_30:1; |
444 | unsigned int hdtv_std_smpte_274m_1080p_50:1; | 444 | unsigned int hdtv_std_smpte_274m_1080p_50:1; |
445 | unsigned int hdtv_std_smpte_274m_1080p_59:1; | 445 | unsigned int hdtv_std_smpte_274m_1080p_59:1; |
446 | unsigned int hdtv_std_smpte_274m_1080p_60:1; | 446 | unsigned int hdtv_std_smpte_274m_1080p_60:1; |
447 | unsigned int hdtv_std_smpte_295m_1080i_50:1; | 447 | unsigned int hdtv_std_smpte_295m_1080i_50:1; |
448 | 448 | ||
449 | unsigned int hdtv_std_smpte_295m_1080p_50:1; | 449 | unsigned int hdtv_std_smpte_295m_1080p_50:1; |
450 | unsigned int hdtv_std_smpte_296m_720p_59:1; | 450 | unsigned int hdtv_std_smpte_296m_720p_59:1; |
451 | unsigned int hdtv_std_smpte_296m_720p_60:1; | 451 | unsigned int hdtv_std_smpte_296m_720p_60:1; |
452 | unsigned int hdtv_std_smpte_296m_720p_50:1; | 452 | unsigned int hdtv_std_smpte_296m_720p_50:1; |
453 | unsigned int hdtv_std_smpte_293m_480p_59:1; | 453 | unsigned int hdtv_std_smpte_293m_480p_59:1; |
454 | unsigned int hdtv_std_smpte_170m_480i_59:1; | 454 | unsigned int hdtv_std_smpte_170m_480i_59:1; |
455 | unsigned int hdtv_std_iturbt601_576i_50:1; | 455 | unsigned int hdtv_std_iturbt601_576i_50:1; |
456 | unsigned int hdtv_std_iturbt601_576p_50:1; | 456 | unsigned int hdtv_std_iturbt601_576p_50:1; |
457 | 457 | ||
458 | unsigned int hdtv_std_eia_7702a_480i_60:1; | 458 | unsigned int hdtv_std_eia_7702a_480i_60:1; |
459 | unsigned int hdtv_std_eia_7702a_480p_60:1; | 459 | unsigned int hdtv_std_eia_7702a_480p_60:1; |
460 | unsigned int pad:6; | 460 | unsigned int pad:6; |
461 | } __attribute__((packed)); | 461 | } __attribute__((packed)); |
462 | 462 | ||
463 | struct intel_sdvo_hdtv_resolution_reply { | 463 | struct intel_sdvo_hdtv_resolution_reply { |
464 | unsigned int res_640x480:1; | 464 | unsigned int res_640x480:1; |
465 | unsigned int res_800x600:1; | 465 | unsigned int res_800x600:1; |
466 | unsigned int res_1024x768:1; | 466 | unsigned int res_1024x768:1; |
467 | unsigned int res_1280x960:1; | 467 | unsigned int res_1280x960:1; |
468 | unsigned int res_1400x1050:1; | 468 | unsigned int res_1400x1050:1; |
469 | unsigned int res_1600x1200:1; | 469 | unsigned int res_1600x1200:1; |
470 | unsigned int res_1920x1440:1; | 470 | unsigned int res_1920x1440:1; |
471 | unsigned int res_2048x1536:1; | 471 | unsigned int res_2048x1536:1; |
472 | 472 | ||
473 | unsigned int res_2560x1920:1; | 473 | unsigned int res_2560x1920:1; |
474 | unsigned int res_3200x2400:1; | 474 | unsigned int res_3200x2400:1; |
475 | unsigned int res_3840x2880:1; | 475 | unsigned int res_3840x2880:1; |
476 | unsigned int pad1:5; | 476 | unsigned int pad1:5; |
477 | 477 | ||
478 | unsigned int res_848x480:1; | 478 | unsigned int res_848x480:1; |
479 | unsigned int res_1064x600:1; | 479 | unsigned int res_1064x600:1; |
480 | unsigned int res_1280x720:1; | 480 | unsigned int res_1280x720:1; |
481 | unsigned int res_1360x768:1; | 481 | unsigned int res_1360x768:1; |
482 | unsigned int res_1704x960:1; | 482 | unsigned int res_1704x960:1; |
483 | unsigned int res_1864x1050:1; | 483 | unsigned int res_1864x1050:1; |
484 | unsigned int res_1920x1080:1; | 484 | unsigned int res_1920x1080:1; |
485 | unsigned int res_2128x1200:1; | 485 | unsigned int res_2128x1200:1; |
486 | 486 | ||
487 | unsigned int res_2560x1400:1; | 487 | unsigned int res_2560x1400:1; |
488 | unsigned int res_2728x1536:1; | 488 | unsigned int res_2728x1536:1; |
489 | unsigned int res_3408x1920:1; | 489 | unsigned int res_3408x1920:1; |
490 | unsigned int res_4264x2400:1; | 490 | unsigned int res_4264x2400:1; |
491 | unsigned int res_5120x2880:1; | 491 | unsigned int res_5120x2880:1; |
492 | unsigned int pad2:3; | 492 | unsigned int pad2:3; |
493 | 493 | ||
494 | unsigned int res_768x480:1; | 494 | unsigned int res_768x480:1; |
495 | unsigned int res_960x600:1; | 495 | unsigned int res_960x600:1; |
496 | unsigned int res_1152x720:1; | 496 | unsigned int res_1152x720:1; |
497 | unsigned int res_1124x768:1; | 497 | unsigned int res_1124x768:1; |
498 | unsigned int res_1536x960:1; | 498 | unsigned int res_1536x960:1; |
499 | unsigned int res_1680x1050:1; | 499 | unsigned int res_1680x1050:1; |
500 | unsigned int res_1728x1080:1; | 500 | unsigned int res_1728x1080:1; |
501 | unsigned int res_1920x1200:1; | 501 | unsigned int res_1920x1200:1; |
502 | 502 | ||
503 | unsigned int res_2304x1440:1; | 503 | unsigned int res_2304x1440:1; |
504 | unsigned int res_2456x1536:1; | 504 | unsigned int res_2456x1536:1; |
505 | unsigned int res_3072x1920:1; | 505 | unsigned int res_3072x1920:1; |
506 | unsigned int res_3840x2400:1; | 506 | unsigned int res_3840x2400:1; |
507 | unsigned int res_4608x2880:1; | 507 | unsigned int res_4608x2880:1; |
508 | unsigned int pad3:3; | 508 | unsigned int pad3:3; |
509 | 509 | ||
510 | unsigned int res_1280x1024:1; | 510 | unsigned int res_1280x1024:1; |
511 | unsigned int pad4:7; | 511 | unsigned int pad4:7; |
512 | 512 | ||
513 | unsigned int res_1280x768:1; | 513 | unsigned int res_1280x768:1; |
514 | unsigned int pad5:7; | 514 | unsigned int pad5:7; |
515 | } __attribute__((packed)); | 515 | } __attribute__((packed)); |
516 | 516 | ||
517 | /* Get supported power state returns info for encoder and monitor, rely on | 517 | /* Get supported power state returns info for encoder and monitor, rely on |
@@ -539,25 +539,25 @@ struct intel_sdvo_hdtv_resolution_reply { | |||
539 | * The high fields are bits 8:9 of the 10-bit values. | 539 | * The high fields are bits 8:9 of the 10-bit values. |
540 | */ | 540 | */ |
541 | struct sdvo_panel_power_sequencing { | 541 | struct sdvo_panel_power_sequencing { |
542 | u8 t0; | 542 | u8 t0; |
543 | u8 t1; | 543 | u8 t1; |
544 | u8 t2; | 544 | u8 t2; |
545 | u8 t3; | 545 | u8 t3; |
546 | u8 t4; | 546 | u8 t4; |
547 | 547 | ||
548 | unsigned int t0_high:2; | 548 | unsigned int t0_high:2; |
549 | unsigned int t1_high:2; | 549 | unsigned int t1_high:2; |
550 | unsigned int t2_high:2; | 550 | unsigned int t2_high:2; |
551 | unsigned int t3_high:2; | 551 | unsigned int t3_high:2; |
552 | 552 | ||
553 | unsigned int t4_high:2; | 553 | unsigned int t4_high:2; |
554 | unsigned int pad:6; | 554 | unsigned int pad:6; |
555 | } __attribute__((packed)); | 555 | } __attribute__((packed)); |
556 | 556 | ||
557 | #define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30 | 557 | #define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30 |
558 | struct sdvo_max_backlight_reply { | 558 | struct sdvo_max_backlight_reply { |
559 | u8 max_value; | 559 | u8 max_value; |
560 | u8 default_value; | 560 | u8 default_value; |
561 | } __attribute__((packed)); | 561 | } __attribute__((packed)); |
562 | 562 | ||
563 | #define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31 | 563 | #define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31 |
@@ -565,16 +565,16 @@ struct sdvo_max_backlight_reply { | |||
565 | 565 | ||
566 | #define SDVO_CMD_GET_AMBIENT_LIGHT 0x33 | 566 | #define SDVO_CMD_GET_AMBIENT_LIGHT 0x33 |
567 | struct sdvo_get_ambient_light_reply { | 567 | struct sdvo_get_ambient_light_reply { |
568 | u16 trip_low; | 568 | u16 trip_low; |
569 | u16 trip_high; | 569 | u16 trip_high; |
570 | u16 value; | 570 | u16 value; |
571 | } __attribute__((packed)); | 571 | } __attribute__((packed)); |
572 | #define SDVO_CMD_SET_AMBIENT_LIGHT 0x34 | 572 | #define SDVO_CMD_SET_AMBIENT_LIGHT 0x34 |
573 | struct sdvo_set_ambient_light_reply { | 573 | struct sdvo_set_ambient_light_reply { |
574 | u16 trip_low; | 574 | u16 trip_low; |
575 | u16 trip_high; | 575 | u16 trip_high; |
576 | unsigned int enable:1; | 576 | unsigned int enable:1; |
577 | unsigned int pad:7; | 577 | unsigned int pad:7; |
578 | } __attribute__((packed)); | 578 | } __attribute__((packed)); |
579 | 579 | ||
580 | /* Set display power state */ | 580 | /* Set display power state */ |
@@ -586,23 +586,23 @@ struct sdvo_set_ambient_light_reply { | |||
586 | 586 | ||
587 | #define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84 | 587 | #define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84 |
588 | struct intel_sdvo_enhancements_reply { | 588 | struct intel_sdvo_enhancements_reply { |
589 | unsigned int flicker_filter:1; | 589 | unsigned int flicker_filter:1; |
590 | unsigned int flicker_filter_adaptive:1; | 590 | unsigned int flicker_filter_adaptive:1; |
591 | unsigned int flicker_filter_2d:1; | 591 | unsigned int flicker_filter_2d:1; |
592 | unsigned int saturation:1; | 592 | unsigned int saturation:1; |
593 | unsigned int hue:1; | 593 | unsigned int hue:1; |
594 | unsigned int brightness:1; | 594 | unsigned int brightness:1; |
595 | unsigned int contrast:1; | 595 | unsigned int contrast:1; |
596 | unsigned int overscan_h:1; | 596 | unsigned int overscan_h:1; |
597 | 597 | ||
598 | unsigned int overscan_v:1; | 598 | unsigned int overscan_v:1; |
599 | unsigned int hpos:1; | 599 | unsigned int hpos:1; |
600 | unsigned int vpos:1; | 600 | unsigned int vpos:1; |
601 | unsigned int sharpness:1; | 601 | unsigned int sharpness:1; |
602 | unsigned int dot_crawl:1; | 602 | unsigned int dot_crawl:1; |
603 | unsigned int dither:1; | 603 | unsigned int dither:1; |
604 | unsigned int tv_chroma_filter:1; | 604 | unsigned int tv_chroma_filter:1; |
605 | unsigned int tv_luma_filter:1; | 605 | unsigned int tv_luma_filter:1; |
606 | } __attribute__((packed)); | 606 | } __attribute__((packed)); |
607 | 607 | ||
608 | /* Picture enhancement limits below are dependent on the current TV format, | 608 | /* Picture enhancement limits below are dependent on the current TV format, |
@@ -623,8 +623,8 @@ struct intel_sdvo_enhancements_reply { | |||
623 | #define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74 | 623 | #define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74 |
624 | #define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77 | 624 | #define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77 |
625 | struct intel_sdvo_enhancement_limits_reply { | 625 | struct intel_sdvo_enhancement_limits_reply { |
626 | u16 max_value; | 626 | u16 max_value; |
627 | u16 default_value; | 627 | u16 default_value; |
628 | } __attribute__((packed)); | 628 | } __attribute__((packed)); |
629 | 629 | ||
630 | #define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f | 630 | #define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f |
@@ -665,8 +665,8 @@ struct intel_sdvo_enhancement_limits_reply { | |||
665 | #define SDVO_CMD_GET_TV_LUMA_FILTER 0x78 | 665 | #define SDVO_CMD_GET_TV_LUMA_FILTER 0x78 |
666 | #define SDVO_CMD_SET_TV_LUMA_FILTER 0x79 | 666 | #define SDVO_CMD_SET_TV_LUMA_FILTER 0x79 |
667 | struct intel_sdvo_enhancements_arg { | 667 | struct intel_sdvo_enhancements_arg { |
668 | u16 value; | 668 | u16 value; |
669 | }__attribute__((packed)); | 669 | } __attribute__((packed)); |
670 | 670 | ||
671 | #define SDVO_CMD_GET_DOT_CRAWL 0x70 | 671 | #define SDVO_CMD_GET_DOT_CRAWL 0x70 |
672 | #define SDVO_CMD_SET_DOT_CRAWL 0x71 | 672 | #define SDVO_CMD_SET_DOT_CRAWL 0x71 |
@@ -717,7 +717,7 @@ struct intel_sdvo_enhancements_arg { | |||
717 | #define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c | 717 | #define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c |
718 | #define SDVO_NEED_TO_STALL (1 << 7) | 718 | #define SDVO_NEED_TO_STALL (1 << 7) |
719 | 719 | ||
720 | struct intel_sdvo_encode{ | 720 | struct intel_sdvo_encode { |
721 | u8 dvi_rev; | 721 | u8 dvi_rev; |
722 | u8 hdmi_rev; | 722 | u8 hdmi_rev; |
723 | } __attribute__ ((packed)); | 723 | } __attribute__ ((packed)); |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 210d570fd516..f3c6a9a8b081 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -194,10 +194,10 @@ static const u32 filter_table[] = { | |||
194 | * | 194 | * |
195 | * if (f >= 1) { | 195 | * if (f >= 1) { |
196 | * exp = 0x7; | 196 | * exp = 0x7; |
197 | * mant = 1 << 8; | 197 | * mant = 1 << 8; |
198 | * } else { | 198 | * } else { |
199 | * for (exp = 0; exp < 3 && f < 0.5; exp++) | 199 | * for (exp = 0; exp < 3 && f < 0.5; exp++) |
200 | * f *= 2.0; | 200 | * f *= 2.0; |
201 | * mant = (f * (1 << 9) + 0.5); | 201 | * mant = (f * (1 << 9) + 0.5); |
202 | * if (mant >= (1 << 9)) | 202 | * if (mant >= (1 << 9)) |
203 | * mant = (1 << 9) - 1; | 203 | * mant = (1 << 9) - 1; |
@@ -430,7 +430,7 @@ static const struct tv_mode tv_modes[] = { | |||
430 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, | 430 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, |
431 | .vsync_len = 6, | 431 | .vsync_len = 6, |
432 | 432 | ||
433 | .veq_ena = true, .veq_start_f1 = 0, | 433 | .veq_ena = true, .veq_start_f1 = 0, |
434 | .veq_start_f2 = 1, .veq_len = 18, | 434 | .veq_start_f2 = 1, .veq_len = 18, |
435 | 435 | ||
436 | .vi_end_f1 = 20, .vi_end_f2 = 21, | 436 | .vi_end_f1 = 20, .vi_end_f2 = 21, |
@@ -472,7 +472,7 @@ static const struct tv_mode tv_modes[] = { | |||
472 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, | 472 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, |
473 | .vsync_len = 6, | 473 | .vsync_len = 6, |
474 | 474 | ||
475 | .veq_ena = true, .veq_start_f1 = 0, | 475 | .veq_ena = true, .veq_start_f1 = 0, |
476 | .veq_start_f2 = 1, .veq_len = 18, | 476 | .veq_start_f2 = 1, .veq_len = 18, |
477 | 477 | ||
478 | .vi_end_f1 = 20, .vi_end_f2 = 21, | 478 | .vi_end_f1 = 20, .vi_end_f2 = 21, |
@@ -515,7 +515,7 @@ static const struct tv_mode tv_modes[] = { | |||
515 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, | 515 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, |
516 | .vsync_len = 6, | 516 | .vsync_len = 6, |
517 | 517 | ||
518 | .veq_ena = true, .veq_start_f1 = 0, | 518 | .veq_ena = true, .veq_start_f1 = 0, |
519 | .veq_start_f2 = 1, .veq_len = 18, | 519 | .veq_start_f2 = 1, .veq_len = 18, |
520 | 520 | ||
521 | .vi_end_f1 = 20, .vi_end_f2 = 21, | 521 | .vi_end_f1 = 20, .vi_end_f2 = 21, |
@@ -558,7 +558,7 @@ static const struct tv_mode tv_modes[] = { | |||
558 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, | 558 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, |
559 | .vsync_len = 6, | 559 | .vsync_len = 6, |
560 | 560 | ||
561 | .veq_ena = true, .veq_start_f1 = 0, | 561 | .veq_ena = true, .veq_start_f1 = 0, |
562 | .veq_start_f2 = 1, .veq_len = 18, | 562 | .veq_start_f2 = 1, .veq_len = 18, |
563 | 563 | ||
564 | .vi_end_f1 = 20, .vi_end_f2 = 21, | 564 | .vi_end_f1 = 20, .vi_end_f2 = 21, |
@@ -602,14 +602,14 @@ static const struct tv_mode tv_modes[] = { | |||
602 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, | 602 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, |
603 | .vsync_len = 6, | 603 | .vsync_len = 6, |
604 | 604 | ||
605 | .veq_ena = true, .veq_start_f1 = 0, | 605 | .veq_ena = true, .veq_start_f1 = 0, |
606 | .veq_start_f2 = 1, .veq_len = 18, | 606 | .veq_start_f2 = 1, .veq_len = 18, |
607 | 607 | ||
608 | .vi_end_f1 = 24, .vi_end_f2 = 25, | 608 | .vi_end_f1 = 24, .vi_end_f2 = 25, |
609 | .nbr_end = 286, | 609 | .nbr_end = 286, |
610 | 610 | ||
611 | .burst_ena = true, | 611 | .burst_ena = true, |
612 | .hburst_start = 73, .hburst_len = 34, | 612 | .hburst_start = 73, .hburst_len = 34, |
613 | .vburst_start_f1 = 8, .vburst_end_f1 = 285, | 613 | .vburst_start_f1 = 8, .vburst_end_f1 = 285, |
614 | .vburst_start_f2 = 8, .vburst_end_f2 = 286, | 614 | .vburst_start_f2 = 8, .vburst_end_f2 = 286, |
615 | .vburst_start_f3 = 9, .vburst_end_f3 = 286, | 615 | .vburst_start_f3 = 9, .vburst_end_f3 = 286, |
@@ -646,7 +646,7 @@ static const struct tv_mode tv_modes[] = { | |||
646 | .vsync_start_f1 = 5, .vsync_start_f2 = 6, | 646 | .vsync_start_f1 = 5, .vsync_start_f2 = 6, |
647 | .vsync_len = 5, | 647 | .vsync_len = 5, |
648 | 648 | ||
649 | .veq_ena = true, .veq_start_f1 = 0, | 649 | .veq_ena = true, .veq_start_f1 = 0, |
650 | .veq_start_f2 = 1, .veq_len = 15, | 650 | .veq_start_f2 = 1, .veq_len = 15, |
651 | 651 | ||
652 | .vi_end_f1 = 24, .vi_end_f2 = 25, | 652 | .vi_end_f1 = 24, .vi_end_f2 = 25, |
@@ -675,7 +675,7 @@ static const struct tv_mode tv_modes[] = { | |||
675 | }, | 675 | }, |
676 | { | 676 | { |
677 | .name = "480p@59.94Hz", | 677 | .name = "480p@59.94Hz", |
678 | .clock = 107520, | 678 | .clock = 107520, |
679 | .refresh = 59940, | 679 | .refresh = 59940, |
680 | .oversample = TV_OVERSAMPLE_4X, | 680 | .oversample = TV_OVERSAMPLE_4X, |
681 | .component_only = 1, | 681 | .component_only = 1, |
@@ -683,7 +683,7 @@ static const struct tv_mode tv_modes[] = { | |||
683 | .hsync_end = 64, .hblank_end = 122, | 683 | .hsync_end = 64, .hblank_end = 122, |
684 | .hblank_start = 842, .htotal = 857, | 684 | .hblank_start = 842, .htotal = 857, |
685 | 685 | ||
686 | .progressive = true,.trilevel_sync = false, | 686 | .progressive = true, .trilevel_sync = false, |
687 | 687 | ||
688 | .vsync_start_f1 = 12, .vsync_start_f2 = 12, | 688 | .vsync_start_f1 = 12, .vsync_start_f2 = 12, |
689 | .vsync_len = 12, | 689 | .vsync_len = 12, |
@@ -699,7 +699,7 @@ static const struct tv_mode tv_modes[] = { | |||
699 | }, | 699 | }, |
700 | { | 700 | { |
701 | .name = "480p@60Hz", | 701 | .name = "480p@60Hz", |
702 | .clock = 107520, | 702 | .clock = 107520, |
703 | .refresh = 60000, | 703 | .refresh = 60000, |
704 | .oversample = TV_OVERSAMPLE_4X, | 704 | .oversample = TV_OVERSAMPLE_4X, |
705 | .component_only = 1, | 705 | .component_only = 1, |
@@ -707,7 +707,7 @@ static const struct tv_mode tv_modes[] = { | |||
707 | .hsync_end = 64, .hblank_end = 122, | 707 | .hsync_end = 64, .hblank_end = 122, |
708 | .hblank_start = 842, .htotal = 856, | 708 | .hblank_start = 842, .htotal = 856, |
709 | 709 | ||
710 | .progressive = true,.trilevel_sync = false, | 710 | .progressive = true, .trilevel_sync = false, |
711 | 711 | ||
712 | .vsync_start_f1 = 12, .vsync_start_f2 = 12, | 712 | .vsync_start_f1 = 12, .vsync_start_f2 = 12, |
713 | .vsync_len = 12, | 713 | .vsync_len = 12, |
@@ -723,7 +723,7 @@ static const struct tv_mode tv_modes[] = { | |||
723 | }, | 723 | }, |
724 | { | 724 | { |
725 | .name = "576p", | 725 | .name = "576p", |
726 | .clock = 107520, | 726 | .clock = 107520, |
727 | .refresh = 50000, | 727 | .refresh = 50000, |
728 | .oversample = TV_OVERSAMPLE_4X, | 728 | .oversample = TV_OVERSAMPLE_4X, |
729 | .component_only = 1, | 729 | .component_only = 1, |
@@ -755,7 +755,7 @@ static const struct tv_mode tv_modes[] = { | |||
755 | .hsync_end = 80, .hblank_end = 300, | 755 | .hsync_end = 80, .hblank_end = 300, |
756 | .hblank_start = 1580, .htotal = 1649, | 756 | .hblank_start = 1580, .htotal = 1649, |
757 | 757 | ||
758 | .progressive = true, .trilevel_sync = true, | 758 | .progressive = true, .trilevel_sync = true, |
759 | 759 | ||
760 | .vsync_start_f1 = 10, .vsync_start_f2 = 10, | 760 | .vsync_start_f1 = 10, .vsync_start_f2 = 10, |
761 | .vsync_len = 10, | 761 | .vsync_len = 10, |
@@ -779,7 +779,7 @@ static const struct tv_mode tv_modes[] = { | |||
779 | .hsync_end = 80, .hblank_end = 300, | 779 | .hsync_end = 80, .hblank_end = 300, |
780 | .hblank_start = 1580, .htotal = 1651, | 780 | .hblank_start = 1580, .htotal = 1651, |
781 | 781 | ||
782 | .progressive = true, .trilevel_sync = true, | 782 | .progressive = true, .trilevel_sync = true, |
783 | 783 | ||
784 | .vsync_start_f1 = 10, .vsync_start_f2 = 10, | 784 | .vsync_start_f1 = 10, .vsync_start_f2 = 10, |
785 | .vsync_len = 10, | 785 | .vsync_len = 10, |
@@ -803,7 +803,7 @@ static const struct tv_mode tv_modes[] = { | |||
803 | .hsync_end = 80, .hblank_end = 300, | 803 | .hsync_end = 80, .hblank_end = 300, |
804 | .hblank_start = 1580, .htotal = 1979, | 804 | .hblank_start = 1580, .htotal = 1979, |
805 | 805 | ||
806 | .progressive = true, .trilevel_sync = true, | 806 | .progressive = true, .trilevel_sync = true, |
807 | 807 | ||
808 | .vsync_start_f1 = 10, .vsync_start_f2 = 10, | 808 | .vsync_start_f1 = 10, .vsync_start_f2 = 10, |
809 | .vsync_len = 10, | 809 | .vsync_len = 10, |
@@ -828,12 +828,12 @@ static const struct tv_mode tv_modes[] = { | |||
828 | .hsync_end = 88, .hblank_end = 235, | 828 | .hsync_end = 88, .hblank_end = 235, |
829 | .hblank_start = 2155, .htotal = 2639, | 829 | .hblank_start = 2155, .htotal = 2639, |
830 | 830 | ||
831 | .progressive = false, .trilevel_sync = true, | 831 | .progressive = false, .trilevel_sync = true, |
832 | 832 | ||
833 | .vsync_start_f1 = 4, .vsync_start_f2 = 5, | 833 | .vsync_start_f1 = 4, .vsync_start_f2 = 5, |
834 | .vsync_len = 10, | 834 | .vsync_len = 10, |
835 | 835 | ||
836 | .veq_ena = true, .veq_start_f1 = 4, | 836 | .veq_ena = true, .veq_start_f1 = 4, |
837 | .veq_start_f2 = 4, .veq_len = 10, | 837 | .veq_start_f2 = 4, .veq_len = 10, |
838 | 838 | ||
839 | 839 | ||
@@ -854,12 +854,12 @@ static const struct tv_mode tv_modes[] = { | |||
854 | .hsync_end = 88, .hblank_end = 235, | 854 | .hsync_end = 88, .hblank_end = 235, |
855 | .hblank_start = 2155, .htotal = 2199, | 855 | .hblank_start = 2155, .htotal = 2199, |
856 | 856 | ||
857 | .progressive = false, .trilevel_sync = true, | 857 | .progressive = false, .trilevel_sync = true, |
858 | 858 | ||
859 | .vsync_start_f1 = 4, .vsync_start_f2 = 5, | 859 | .vsync_start_f1 = 4, .vsync_start_f2 = 5, |
860 | .vsync_len = 10, | 860 | .vsync_len = 10, |
861 | 861 | ||
862 | .veq_ena = true, .veq_start_f1 = 4, | 862 | .veq_ena = true, .veq_start_f1 = 4, |
863 | .veq_start_f2 = 4, .veq_len = 10, | 863 | .veq_start_f2 = 4, .veq_len = 10, |
864 | 864 | ||
865 | 865 | ||
@@ -880,16 +880,16 @@ static const struct tv_mode tv_modes[] = { | |||
880 | .hsync_end = 88, .hblank_end = 235, | 880 | .hsync_end = 88, .hblank_end = 235, |
881 | .hblank_start = 2155, .htotal = 2201, | 881 | .hblank_start = 2155, .htotal = 2201, |
882 | 882 | ||
883 | .progressive = false, .trilevel_sync = true, | 883 | .progressive = false, .trilevel_sync = true, |
884 | 884 | ||
885 | .vsync_start_f1 = 4, .vsync_start_f2 = 5, | 885 | .vsync_start_f1 = 4, .vsync_start_f2 = 5, |
886 | .vsync_len = 10, | 886 | .vsync_len = 10, |
887 | 887 | ||
888 | .veq_ena = true, .veq_start_f1 = 4, | 888 | .veq_ena = true, .veq_start_f1 = 4, |
889 | .veq_start_f2 = 4, .veq_len = 10, | 889 | .veq_start_f2 = 4, .veq_len = 10, |
890 | 890 | ||
891 | 891 | ||
892 | .vi_end_f1 = 21, .vi_end_f2 = 22, | 892 | .vi_end_f1 = 21, .vi_end_f2 = 22, |
893 | .nbr_end = 539, | 893 | .nbr_end = 539, |
894 | 894 | ||
895 | .burst_ena = false, | 895 | .burst_ena = false, |
@@ -916,7 +916,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode) | |||
916 | struct drm_device *dev = encoder->dev; | 916 | struct drm_device *dev = encoder->dev; |
917 | struct drm_i915_private *dev_priv = dev->dev_private; | 917 | struct drm_i915_private *dev_priv = dev->dev_private; |
918 | 918 | ||
919 | switch(mode) { | 919 | switch (mode) { |
920 | case DRM_MODE_DPMS_ON: | 920 | case DRM_MODE_DPMS_ON: |
921 | I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); | 921 | I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); |
922 | break; | 922 | break; |
@@ -933,7 +933,7 @@ intel_tv_mode_lookup(const char *tv_format) | |||
933 | { | 933 | { |
934 | int i; | 934 | int i; |
935 | 935 | ||
936 | for (i = 0; i < sizeof(tv_modes) / sizeof (tv_modes[0]); i++) { | 936 | for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) { |
937 | const struct tv_mode *tv_mode = &tv_modes[i]; | 937 | const struct tv_mode *tv_mode = &tv_modes[i]; |
938 | 938 | ||
939 | if (!strcmp(tv_format, tv_mode->name)) | 939 | if (!strcmp(tv_format, tv_mode->name)) |
@@ -1128,7 +1128,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1128 | if (color_conversion) { | 1128 | if (color_conversion) { |
1129 | I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) | | 1129 | I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) | |
1130 | color_conversion->gy); | 1130 | color_conversion->gy); |
1131 | I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) | | 1131 | I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) | |
1132 | color_conversion->ay); | 1132 | color_conversion->ay); |
1133 | I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) | | 1133 | I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) | |
1134 | color_conversion->gu); | 1134 | color_conversion->gu); |
@@ -1232,7 +1232,7 @@ static const struct drm_display_mode reported_modes[] = { | |||
1232 | * \return false if TV is disconnected. | 1232 | * \return false if TV is disconnected. |
1233 | */ | 1233 | */ |
1234 | static int | 1234 | static int |
1235 | intel_tv_detect_type (struct intel_tv *intel_tv, | 1235 | intel_tv_detect_type(struct intel_tv *intel_tv, |
1236 | struct drm_connector *connector) | 1236 | struct drm_connector *connector) |
1237 | { | 1237 | { |
1238 | struct drm_encoder *encoder = &intel_tv->base.base; | 1238 | struct drm_encoder *encoder = &intel_tv->base.base; |
@@ -1486,7 +1486,7 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1486 | } | 1486 | } |
1487 | 1487 | ||
1488 | static void | 1488 | static void |
1489 | intel_tv_destroy (struct drm_connector *connector) | 1489 | intel_tv_destroy(struct drm_connector *connector) |
1490 | { | 1490 | { |
1491 | drm_sysfs_connector_remove(connector); | 1491 | drm_sysfs_connector_remove(connector); |
1492 | drm_connector_cleanup(connector); | 1492 | drm_connector_cleanup(connector); |
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 0583677e4581..35ef5b1e3566 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
@@ -21,16 +21,17 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | |||
21 | nv40_grctx.o nv50_grctx.o nvc0_grctx.o \ | 21 | nv40_grctx.o nv50_grctx.o nvc0_grctx.o \ |
22 | nv84_crypt.o \ | 22 | nv84_crypt.o \ |
23 | nva3_copy.o nvc0_copy.o \ | 23 | nva3_copy.o nvc0_copy.o \ |
24 | nv40_mpeg.o nv50_mpeg.o \ | 24 | nv31_mpeg.o nv50_mpeg.o \ |
25 | nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ | 25 | nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ |
26 | nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ | ||
27 | nv50_cursor.o nv50_display.o \ | ||
28 | nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ | 26 | nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ |
29 | nv04_crtc.o nv04_display.o nv04_cursor.o \ | 27 | nv04_crtc.o nv04_display.o nv04_cursor.o \ |
28 | nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ | ||
29 | nv50_cursor.o nv50_display.o \ | ||
30 | nvd0_display.o \ | ||
30 | nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \ | 31 | nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \ |
31 | nv10_gpio.o nv50_gpio.o \ | 32 | nv10_gpio.o nv50_gpio.o \ |
32 | nv50_calc.o \ | 33 | nv50_calc.o \ |
33 | nv04_pm.o nv50_pm.o nva3_pm.o \ | 34 | nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \ |
34 | nv50_vram.o nvc0_vram.o \ | 35 | nv50_vram.o nvc0_vram.o \ |
35 | nv50_vm.o nvc0_vm.o | 36 | nv50_vm.o nvc0_vm.o |
36 | 37 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 00a55dfdba82..fa22b28e8777 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
@@ -37,8 +37,10 @@ | |||
37 | #include "nouveau_drv.h" | 37 | #include "nouveau_drv.h" |
38 | #include "nouveau_drm.h" | 38 | #include "nouveau_drm.h" |
39 | #include "nouveau_reg.h" | 39 | #include "nouveau_reg.h" |
40 | #include "nouveau_encoder.h" | ||
40 | 41 | ||
41 | static int nv40_get_intensity(struct backlight_device *bd) | 42 | static int |
43 | nv40_get_intensity(struct backlight_device *bd) | ||
42 | { | 44 | { |
43 | struct drm_device *dev = bl_get_data(bd); | 45 | struct drm_device *dev = bl_get_data(bd); |
44 | int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK) | 46 | int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK) |
@@ -47,7 +49,8 @@ static int nv40_get_intensity(struct backlight_device *bd) | |||
47 | return val; | 49 | return val; |
48 | } | 50 | } |
49 | 51 | ||
50 | static int nv40_set_intensity(struct backlight_device *bd) | 52 | static int |
53 | nv40_set_intensity(struct backlight_device *bd) | ||
51 | { | 54 | { |
52 | struct drm_device *dev = bl_get_data(bd); | 55 | struct drm_device *dev = bl_get_data(bd); |
53 | int val = bd->props.brightness; | 56 | int val = bd->props.brightness; |
@@ -65,30 +68,8 @@ static const struct backlight_ops nv40_bl_ops = { | |||
65 | .update_status = nv40_set_intensity, | 68 | .update_status = nv40_set_intensity, |
66 | }; | 69 | }; |
67 | 70 | ||
68 | static int nv50_get_intensity(struct backlight_device *bd) | 71 | static int |
69 | { | 72 | nv40_backlight_init(struct drm_connector *connector) |
70 | struct drm_device *dev = bl_get_data(bd); | ||
71 | |||
72 | return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT); | ||
73 | } | ||
74 | |||
75 | static int nv50_set_intensity(struct backlight_device *bd) | ||
76 | { | ||
77 | struct drm_device *dev = bl_get_data(bd); | ||
78 | int val = bd->props.brightness; | ||
79 | |||
80 | nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT, | ||
81 | val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static const struct backlight_ops nv50_bl_ops = { | ||
86 | .options = BL_CORE_SUSPENDRESUME, | ||
87 | .get_brightness = nv50_get_intensity, | ||
88 | .update_status = nv50_set_intensity, | ||
89 | }; | ||
90 | |||
91 | static int nouveau_nv40_backlight_init(struct drm_connector *connector) | ||
92 | { | 73 | { |
93 | struct drm_device *dev = connector->dev; | 74 | struct drm_device *dev = connector->dev; |
94 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 75 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -113,34 +94,129 @@ static int nouveau_nv40_backlight_init(struct drm_connector *connector) | |||
113 | return 0; | 94 | return 0; |
114 | } | 95 | } |
115 | 96 | ||
116 | static int nouveau_nv50_backlight_init(struct drm_connector *connector) | 97 | static int |
98 | nv50_get_intensity(struct backlight_device *bd) | ||
99 | { | ||
100 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | ||
101 | struct drm_device *dev = nv_encoder->base.base.dev; | ||
102 | int or = nv_encoder->or; | ||
103 | u32 div = 1025; | ||
104 | u32 val; | ||
105 | |||
106 | val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or)); | ||
107 | val &= NV50_PDISP_SOR_PWM_CTL_VAL; | ||
108 | return ((val * 100) + (div / 2)) / div; | ||
109 | } | ||
110 | |||
111 | static int | ||
112 | nv50_set_intensity(struct backlight_device *bd) | ||
113 | { | ||
114 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | ||
115 | struct drm_device *dev = nv_encoder->base.base.dev; | ||
116 | int or = nv_encoder->or; | ||
117 | u32 div = 1025; | ||
118 | u32 val = (bd->props.brightness * div) / 100; | ||
119 | |||
120 | nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or), | ||
121 | NV50_PDISP_SOR_PWM_CTL_NEW | val); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static const struct backlight_ops nv50_bl_ops = { | ||
126 | .options = BL_CORE_SUSPENDRESUME, | ||
127 | .get_brightness = nv50_get_intensity, | ||
128 | .update_status = nv50_set_intensity, | ||
129 | }; | ||
130 | |||
131 | static int | ||
132 | nva3_get_intensity(struct backlight_device *bd) | ||
133 | { | ||
134 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | ||
135 | struct drm_device *dev = nv_encoder->base.base.dev; | ||
136 | int or = nv_encoder->or; | ||
137 | u32 div, val; | ||
138 | |||
139 | div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or)); | ||
140 | val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or)); | ||
141 | val &= NVA3_PDISP_SOR_PWM_CTL_VAL; | ||
142 | if (div && div >= val) | ||
143 | return ((val * 100) + (div / 2)) / div; | ||
144 | |||
145 | return 100; | ||
146 | } | ||
147 | |||
148 | static int | ||
149 | nva3_set_intensity(struct backlight_device *bd) | ||
150 | { | ||
151 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | ||
152 | struct drm_device *dev = nv_encoder->base.base.dev; | ||
153 | int or = nv_encoder->or; | ||
154 | u32 div, val; | ||
155 | |||
156 | div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or)); | ||
157 | val = (bd->props.brightness * div) / 100; | ||
158 | if (div) { | ||
159 | nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or), val | | ||
160 | NV50_PDISP_SOR_PWM_CTL_NEW | | ||
161 | NVA3_PDISP_SOR_PWM_CTL_UNK); | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | return -EINVAL; | ||
166 | } | ||
167 | |||
168 | static const struct backlight_ops nva3_bl_ops = { | ||
169 | .options = BL_CORE_SUSPENDRESUME, | ||
170 | .get_brightness = nva3_get_intensity, | ||
171 | .update_status = nva3_set_intensity, | ||
172 | }; | ||
173 | |||
174 | static int | ||
175 | nv50_backlight_init(struct drm_connector *connector) | ||
117 | { | 176 | { |
118 | struct drm_device *dev = connector->dev; | 177 | struct drm_device *dev = connector->dev; |
119 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 178 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
179 | struct nouveau_encoder *nv_encoder; | ||
120 | struct backlight_properties props; | 180 | struct backlight_properties props; |
121 | struct backlight_device *bd; | 181 | struct backlight_device *bd; |
182 | const struct backlight_ops *ops; | ||
183 | |||
184 | nv_encoder = find_encoder(connector, OUTPUT_LVDS); | ||
185 | if (!nv_encoder) { | ||
186 | nv_encoder = find_encoder(connector, OUTPUT_DP); | ||
187 | if (!nv_encoder) | ||
188 | return -ENODEV; | ||
189 | } | ||
122 | 190 | ||
123 | if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT)) | 191 | if (!nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) |
124 | return 0; | 192 | return 0; |
125 | 193 | ||
194 | if (dev_priv->chipset <= 0xa0 || | ||
195 | dev_priv->chipset == 0xaa || | ||
196 | dev_priv->chipset == 0xac) | ||
197 | ops = &nv50_bl_ops; | ||
198 | else | ||
199 | ops = &nva3_bl_ops; | ||
200 | |||
126 | memset(&props, 0, sizeof(struct backlight_properties)); | 201 | memset(&props, 0, sizeof(struct backlight_properties)); |
127 | props.type = BACKLIGHT_RAW; | 202 | props.type = BACKLIGHT_RAW; |
128 | props.max_brightness = 1025; | 203 | props.max_brightness = 100; |
129 | bd = backlight_device_register("nv_backlight", &connector->kdev, dev, | 204 | bd = backlight_device_register("nv_backlight", &connector->kdev, |
130 | &nv50_bl_ops, &props); | 205 | nv_encoder, ops, &props); |
131 | if (IS_ERR(bd)) | 206 | if (IS_ERR(bd)) |
132 | return PTR_ERR(bd); | 207 | return PTR_ERR(bd); |
133 | 208 | ||
134 | dev_priv->backlight = bd; | 209 | dev_priv->backlight = bd; |
135 | bd->props.brightness = nv50_get_intensity(bd); | 210 | bd->props.brightness = bd->ops->get_brightness(bd); |
136 | backlight_update_status(bd); | 211 | backlight_update_status(bd); |
137 | return 0; | 212 | return 0; |
138 | } | 213 | } |
139 | 214 | ||
140 | int nouveau_backlight_init(struct drm_connector *connector) | 215 | int |
216 | nouveau_backlight_init(struct drm_device *dev) | ||
141 | { | 217 | { |
142 | struct drm_device *dev = connector->dev; | ||
143 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 218 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
219 | struct drm_connector *connector; | ||
144 | 220 | ||
145 | #ifdef CONFIG_ACPI | 221 | #ifdef CONFIG_ACPI |
146 | if (acpi_video_backlight_support()) { | 222 | if (acpi_video_backlight_support()) { |
@@ -150,21 +226,28 @@ int nouveau_backlight_init(struct drm_connector *connector) | |||
150 | } | 226 | } |
151 | #endif | 227 | #endif |
152 | 228 | ||
153 | switch (dev_priv->card_type) { | 229 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
154 | case NV_40: | 230 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && |
155 | return nouveau_nv40_backlight_init(connector); | 231 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) |
156 | case NV_50: | 232 | continue; |
157 | return nouveau_nv50_backlight_init(connector); | 233 | |
158 | default: | 234 | switch (dev_priv->card_type) { |
159 | break; | 235 | case NV_40: |
236 | return nv40_backlight_init(connector); | ||
237 | case NV_50: | ||
238 | return nv50_backlight_init(connector); | ||
239 | default: | ||
240 | break; | ||
241 | } | ||
160 | } | 242 | } |
161 | 243 | ||
244 | |||
162 | return 0; | 245 | return 0; |
163 | } | 246 | } |
164 | 247 | ||
165 | void nouveau_backlight_exit(struct drm_connector *connector) | 248 | void |
249 | nouveau_backlight_exit(struct drm_device *dev) | ||
166 | { | 250 | { |
167 | struct drm_device *dev = connector->dev; | ||
168 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 251 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
169 | 252 | ||
170 | if (dev_priv->backlight) { | 253 | if (dev_priv->backlight) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index b311faba34f8..032a82098136 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -296,6 +296,11 @@ munge_reg(struct nvbios *bios, uint32_t reg) | |||
296 | if (dev_priv->card_type < NV_50) | 296 | if (dev_priv->card_type < NV_50) |
297 | return reg; | 297 | return reg; |
298 | 298 | ||
299 | if (reg & 0x80000000) { | ||
300 | BUG_ON(bios->display.crtc < 0); | ||
301 | reg += bios->display.crtc * 0x800; | ||
302 | } | ||
303 | |||
299 | if (reg & 0x40000000) { | 304 | if (reg & 0x40000000) { |
300 | BUG_ON(!dcbent); | 305 | BUG_ON(!dcbent); |
301 | 306 | ||
@@ -304,7 +309,7 @@ munge_reg(struct nvbios *bios, uint32_t reg) | |||
304 | reg += 0x00000080; | 309 | reg += 0x00000080; |
305 | } | 310 | } |
306 | 311 | ||
307 | reg &= ~0x60000000; | 312 | reg &= ~0xe0000000; |
308 | return reg; | 313 | return reg; |
309 | } | 314 | } |
310 | 315 | ||
@@ -1174,22 +1179,19 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
1174 | * | 1179 | * |
1175 | */ | 1180 | */ |
1176 | 1181 | ||
1177 | struct bit_displayport_encoder_table *dpe = NULL; | ||
1178 | struct dcb_entry *dcb = bios->display.output; | 1182 | struct dcb_entry *dcb = bios->display.output; |
1179 | struct drm_device *dev = bios->dev; | 1183 | struct drm_device *dev = bios->dev; |
1180 | uint8_t cond = bios->data[offset + 1]; | 1184 | uint8_t cond = bios->data[offset + 1]; |
1181 | int dummy; | 1185 | uint8_t *table, *entry; |
1182 | 1186 | ||
1183 | BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond); | 1187 | BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond); |
1184 | 1188 | ||
1185 | if (!iexec->execute) | 1189 | if (!iexec->execute) |
1186 | return 3; | 1190 | return 3; |
1187 | 1191 | ||
1188 | dpe = nouveau_bios_dp_table(dev, dcb, &dummy); | 1192 | table = nouveau_dp_bios_data(dev, dcb, &entry); |
1189 | if (!dpe) { | 1193 | if (!table) |
1190 | NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset); | ||
1191 | return 3; | 1194 | return 3; |
1192 | } | ||
1193 | 1195 | ||
1194 | switch (cond) { | 1196 | switch (cond) { |
1195 | case 0: | 1197 | case 0: |
@@ -1203,7 +1205,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
1203 | break; | 1205 | break; |
1204 | case 1: | 1206 | case 1: |
1205 | case 2: | 1207 | case 2: |
1206 | if (!(dpe->unknown & cond)) | 1208 | if (!(entry[5] & cond)) |
1207 | iexec->execute = false; | 1209 | iexec->execute = false; |
1208 | break; | 1210 | break; |
1209 | case 5: | 1211 | case 5: |
@@ -3221,6 +3223,49 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
3221 | return 1; | 3223 | return 1; |
3222 | } | 3224 | } |
3223 | 3225 | ||
3226 | static void | ||
3227 | init_gpio_unknv50(struct nvbios *bios, struct dcb_gpio_entry *gpio) | ||
3228 | { | ||
3229 | const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; | ||
3230 | u32 r, s, v; | ||
3231 | |||
3232 | /* Not a clue, needs de-magicing */ | ||
3233 | r = nv50_gpio_ctl[gpio->line >> 4]; | ||
3234 | s = (gpio->line & 0x0f); | ||
3235 | v = bios_rd32(bios, r) & ~(0x00010001 << s); | ||
3236 | switch ((gpio->entry & 0x06000000) >> 25) { | ||
3237 | case 1: | ||
3238 | v |= (0x00000001 << s); | ||
3239 | break; | ||
3240 | case 2: | ||
3241 | v |= (0x00010000 << s); | ||
3242 | break; | ||
3243 | default: | ||
3244 | break; | ||
3245 | } | ||
3246 | |||
3247 | bios_wr32(bios, r, v); | ||
3248 | } | ||
3249 | |||
3250 | static void | ||
3251 | init_gpio_unknvd0(struct nvbios *bios, struct dcb_gpio_entry *gpio) | ||
3252 | { | ||
3253 | u32 v, i; | ||
3254 | |||
3255 | v = bios_rd32(bios, 0x00d610 + (gpio->line * 4)); | ||
3256 | v &= 0xffffff00; | ||
3257 | v |= (gpio->entry & 0x00ff0000) >> 16; | ||
3258 | bios_wr32(bios, 0x00d610 + (gpio->line * 4), v); | ||
3259 | |||
3260 | i = (gpio->entry & 0x1f000000) >> 24; | ||
3261 | if (i) { | ||
3262 | v = bios_rd32(bios, 0x00d640 + ((i - 1) * 4)); | ||
3263 | v &= 0xffffff00; | ||
3264 | v |= gpio->line; | ||
3265 | bios_wr32(bios, 0x00d640 + ((i - 1) * 4), v); | ||
3266 | } | ||
3267 | } | ||
3268 | |||
3224 | static int | 3269 | static int |
3225 | init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 3270 | init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
3226 | { | 3271 | { |
@@ -3235,7 +3280,6 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
3235 | 3280 | ||
3236 | struct drm_nouveau_private *dev_priv = bios->dev->dev_private; | 3281 | struct drm_nouveau_private *dev_priv = bios->dev->dev_private; |
3237 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | 3282 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; |
3238 | const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; | ||
3239 | int i; | 3283 | int i; |
3240 | 3284 | ||
3241 | if (dev_priv->card_type < NV_50) { | 3285 | if (dev_priv->card_type < NV_50) { |
@@ -3248,33 +3292,20 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
3248 | 3292 | ||
3249 | for (i = 0; i < bios->dcb.gpio.entries; i++) { | 3293 | for (i = 0; i < bios->dcb.gpio.entries; i++) { |
3250 | struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i]; | 3294 | struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i]; |
3251 | uint32_t r, s, v; | ||
3252 | 3295 | ||
3253 | BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); | 3296 | BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); |
3254 | 3297 | ||
3255 | BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n", | 3298 | BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n", |
3256 | offset, gpio->tag, gpio->state_default); | 3299 | offset, gpio->tag, gpio->state_default); |
3257 | if (bios->execute) | ||
3258 | pgpio->set(bios->dev, gpio->tag, gpio->state_default); | ||
3259 | 3300 | ||
3260 | /* The NVIDIA binary driver doesn't appear to actually do | 3301 | if (!bios->execute) |
3261 | * any of this, my VBIOS does however. | 3302 | continue; |
3262 | */ | 3303 | |
3263 | /* Not a clue, needs de-magicing */ | 3304 | pgpio->set(bios->dev, gpio->tag, gpio->state_default); |
3264 | r = nv50_gpio_ctl[gpio->line >> 4]; | 3305 | if (dev_priv->card_type < NV_D0) |
3265 | s = (gpio->line & 0x0f); | 3306 | init_gpio_unknv50(bios, gpio); |
3266 | v = bios_rd32(bios, r) & ~(0x00010001 << s); | 3307 | else |
3267 | switch ((gpio->entry & 0x06000000) >> 25) { | 3308 | init_gpio_unknvd0(bios, gpio); |
3268 | case 1: | ||
3269 | v |= (0x00000001 << s); | ||
3270 | break; | ||
3271 | case 2: | ||
3272 | v |= (0x00010000 << s); | ||
3273 | break; | ||
3274 | default: | ||
3275 | break; | ||
3276 | } | ||
3277 | bios_wr32(bios, r, v); | ||
3278 | } | 3309 | } |
3279 | 3310 | ||
3280 | return 1; | 3311 | return 1; |
@@ -3737,6 +3768,10 @@ parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
3737 | int count = 0, i, ret; | 3768 | int count = 0, i, ret; |
3738 | uint8_t id; | 3769 | uint8_t id; |
3739 | 3770 | ||
3771 | /* catch NULL script pointers */ | ||
3772 | if (offset == 0) | ||
3773 | return 0; | ||
3774 | |||
3740 | /* | 3775 | /* |
3741 | * Loop until INIT_DONE causes us to break out of the loop | 3776 | * Loop until INIT_DONE causes us to break out of the loop |
3742 | * (or until offset > bios length just in case... ) | 3777 | * (or until offset > bios length just in case... ) |
@@ -4389,86 +4424,37 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b | |||
4389 | return 0; | 4424 | return 0; |
4390 | } | 4425 | } |
4391 | 4426 | ||
4392 | static uint8_t * | 4427 | /* BIT 'U'/'d' table encoder subtables have hashes matching them to |
4393 | bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, | 4428 | * a particular set of encoders. |
4394 | uint16_t record, int record_len, int record_nr, | 4429 | * |
4395 | bool match_link) | 4430 | * This function returns true if a particular DCB entry matches. |
4431 | */ | ||
4432 | bool | ||
4433 | bios_encoder_match(struct dcb_entry *dcb, u32 hash) | ||
4396 | { | 4434 | { |
4397 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 4435 | if ((hash & 0x000000f0) != (dcb->location << 4)) |
4398 | struct nvbios *bios = &dev_priv->vbios; | 4436 | return false; |
4399 | uint32_t entry; | 4437 | if ((hash & 0x0000000f) != dcb->type) |
4400 | uint16_t table; | 4438 | return false; |
4401 | int i, v; | 4439 | if (!(hash & (dcb->or << 16))) |
4440 | return false; | ||
4402 | 4441 | ||
4403 | switch (dcbent->type) { | 4442 | switch (dcb->type) { |
4404 | case OUTPUT_TMDS: | 4443 | case OUTPUT_TMDS: |
4405 | case OUTPUT_LVDS: | 4444 | case OUTPUT_LVDS: |
4406 | case OUTPUT_DP: | 4445 | case OUTPUT_DP: |
4407 | break; | 4446 | if (hash & 0x00c00000) { |
4408 | default: | 4447 | if (!(hash & (dcb->sorconf.link << 22))) |
4409 | match_link = false; | 4448 | return false; |
4410 | break; | ||
4411 | } | ||
4412 | |||
4413 | for (i = 0; i < record_nr; i++, record += record_len) { | ||
4414 | table = ROM16(bios->data[record]); | ||
4415 | if (!table) | ||
4416 | continue; | ||
4417 | entry = ROM32(bios->data[table]); | ||
4418 | |||
4419 | if (match_link) { | ||
4420 | v = (entry & 0x00c00000) >> 22; | ||
4421 | if (!(v & dcbent->sorconf.link)) | ||
4422 | continue; | ||
4423 | } | 4449 | } |
4424 | 4450 | default: | |
4425 | v = (entry & 0x000f0000) >> 16; | 4451 | return true; |
4426 | if (!(v & dcbent->or)) | ||
4427 | continue; | ||
4428 | |||
4429 | v = (entry & 0x000000f0) >> 4; | ||
4430 | if (v != dcbent->location) | ||
4431 | continue; | ||
4432 | |||
4433 | v = (entry & 0x0000000f); | ||
4434 | if (v != dcbent->type) | ||
4435 | continue; | ||
4436 | |||
4437 | return &bios->data[table]; | ||
4438 | } | ||
4439 | |||
4440 | return NULL; | ||
4441 | } | ||
4442 | |||
4443 | void * | ||
4444 | nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent, | ||
4445 | int *length) | ||
4446 | { | ||
4447 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
4448 | struct nvbios *bios = &dev_priv->vbios; | ||
4449 | uint8_t *table; | ||
4450 | |||
4451 | if (!bios->display.dp_table_ptr) { | ||
4452 | NV_ERROR(dev, "No pointer to DisplayPort table\n"); | ||
4453 | return NULL; | ||
4454 | } | ||
4455 | table = &bios->data[bios->display.dp_table_ptr]; | ||
4456 | |||
4457 | if (table[0] != 0x20 && table[0] != 0x21) { | ||
4458 | NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n", | ||
4459 | table[0]); | ||
4460 | return NULL; | ||
4461 | } | 4452 | } |
4462 | |||
4463 | *length = table[4]; | ||
4464 | return bios_output_config_match(dev, dcbent, | ||
4465 | bios->display.dp_table_ptr + table[1], | ||
4466 | table[2], table[3], table[0] >= 0x21); | ||
4467 | } | 4453 | } |
4468 | 4454 | ||
4469 | int | 4455 | int |
4470 | nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | 4456 | nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk, |
4471 | uint32_t sub, int pxclk) | 4457 | struct dcb_entry *dcbent, int crtc) |
4472 | { | 4458 | { |
4473 | /* | 4459 | /* |
4474 | * The display script table is located by the BIT 'U' table. | 4460 | * The display script table is located by the BIT 'U' table. |
@@ -4498,7 +4484,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
4498 | uint8_t *table = &bios->data[bios->display.script_table_ptr]; | 4484 | uint8_t *table = &bios->data[bios->display.script_table_ptr]; |
4499 | uint8_t *otable = NULL; | 4485 | uint8_t *otable = NULL; |
4500 | uint16_t script; | 4486 | uint16_t script; |
4501 | int i = 0; | 4487 | int i; |
4502 | 4488 | ||
4503 | if (!bios->display.script_table_ptr) { | 4489 | if (!bios->display.script_table_ptr) { |
4504 | NV_ERROR(dev, "No pointer to output script table\n"); | 4490 | NV_ERROR(dev, "No pointer to output script table\n"); |
@@ -4550,30 +4536,33 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
4550 | 4536 | ||
4551 | NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n", | 4537 | NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n", |
4552 | dcbent->type, dcbent->location, dcbent->or); | 4538 | dcbent->type, dcbent->location, dcbent->or); |
4553 | otable = bios_output_config_match(dev, dcbent, table[1] + | 4539 | for (i = 0; i < table[3]; i++) { |
4554 | bios->display.script_table_ptr, | 4540 | otable = ROMPTR(bios, table[table[1] + (i * table[2])]); |
4555 | table[2], table[3], table[0] >= 0x21); | 4541 | if (otable && bios_encoder_match(dcbent, ROM32(otable[0]))) |
4542 | break; | ||
4543 | } | ||
4544 | |||
4556 | if (!otable) { | 4545 | if (!otable) { |
4557 | NV_DEBUG_KMS(dev, "failed to match any output table\n"); | 4546 | NV_DEBUG_KMS(dev, "failed to match any output table\n"); |
4558 | return 1; | 4547 | return 1; |
4559 | } | 4548 | } |
4560 | 4549 | ||
4561 | if (pxclk < -2 || pxclk > 0) { | 4550 | if (pclk < -2 || pclk > 0) { |
4562 | /* Try to find matching script table entry */ | 4551 | /* Try to find matching script table entry */ |
4563 | for (i = 0; i < otable[5]; i++) { | 4552 | for (i = 0; i < otable[5]; i++) { |
4564 | if (ROM16(otable[table[4] + i*6]) == sub) | 4553 | if (ROM16(otable[table[4] + i*6]) == type) |
4565 | break; | 4554 | break; |
4566 | } | 4555 | } |
4567 | 4556 | ||
4568 | if (i == otable[5]) { | 4557 | if (i == otable[5]) { |
4569 | NV_ERROR(dev, "Table 0x%04x not found for %d/%d, " | 4558 | NV_ERROR(dev, "Table 0x%04x not found for %d/%d, " |
4570 | "using first\n", | 4559 | "using first\n", |
4571 | sub, dcbent->type, dcbent->or); | 4560 | type, dcbent->type, dcbent->or); |
4572 | i = 0; | 4561 | i = 0; |
4573 | } | 4562 | } |
4574 | } | 4563 | } |
4575 | 4564 | ||
4576 | if (pxclk == 0) { | 4565 | if (pclk == 0) { |
4577 | script = ROM16(otable[6]); | 4566 | script = ROM16(otable[6]); |
4578 | if (!script) { | 4567 | if (!script) { |
4579 | NV_DEBUG_KMS(dev, "output script 0 not found\n"); | 4568 | NV_DEBUG_KMS(dev, "output script 0 not found\n"); |
@@ -4581,9 +4570,9 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
4581 | } | 4570 | } |
4582 | 4571 | ||
4583 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script); | 4572 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script); |
4584 | nouveau_bios_run_init_table(dev, script, dcbent); | 4573 | nouveau_bios_run_init_table(dev, script, dcbent, crtc); |
4585 | } else | 4574 | } else |
4586 | if (pxclk == -1) { | 4575 | if (pclk == -1) { |
4587 | script = ROM16(otable[8]); | 4576 | script = ROM16(otable[8]); |
4588 | if (!script) { | 4577 | if (!script) { |
4589 | NV_DEBUG_KMS(dev, "output script 1 not found\n"); | 4578 | NV_DEBUG_KMS(dev, "output script 1 not found\n"); |
@@ -4591,9 +4580,9 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
4591 | } | 4580 | } |
4592 | 4581 | ||
4593 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script); | 4582 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script); |
4594 | nouveau_bios_run_init_table(dev, script, dcbent); | 4583 | nouveau_bios_run_init_table(dev, script, dcbent, crtc); |
4595 | } else | 4584 | } else |
4596 | if (pxclk == -2) { | 4585 | if (pclk == -2) { |
4597 | if (table[4] >= 12) | 4586 | if (table[4] >= 12) |
4598 | script = ROM16(otable[10]); | 4587 | script = ROM16(otable[10]); |
4599 | else | 4588 | else |
@@ -4604,31 +4593,31 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
4604 | } | 4593 | } |
4605 | 4594 | ||
4606 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script); | 4595 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script); |
4607 | nouveau_bios_run_init_table(dev, script, dcbent); | 4596 | nouveau_bios_run_init_table(dev, script, dcbent, crtc); |
4608 | } else | 4597 | } else |
4609 | if (pxclk > 0) { | 4598 | if (pclk > 0) { |
4610 | script = ROM16(otable[table[4] + i*6 + 2]); | 4599 | script = ROM16(otable[table[4] + i*6 + 2]); |
4611 | if (script) | 4600 | if (script) |
4612 | script = clkcmptable(bios, script, pxclk); | 4601 | script = clkcmptable(bios, script, pclk); |
4613 | if (!script) { | 4602 | if (!script) { |
4614 | NV_DEBUG_KMS(dev, "clock script 0 not found\n"); | 4603 | NV_DEBUG_KMS(dev, "clock script 0 not found\n"); |
4615 | return 1; | 4604 | return 1; |
4616 | } | 4605 | } |
4617 | 4606 | ||
4618 | NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script); | 4607 | NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script); |
4619 | nouveau_bios_run_init_table(dev, script, dcbent); | 4608 | nouveau_bios_run_init_table(dev, script, dcbent, crtc); |
4620 | } else | 4609 | } else |
4621 | if (pxclk < 0) { | 4610 | if (pclk < 0) { |
4622 | script = ROM16(otable[table[4] + i*6 + 4]); | 4611 | script = ROM16(otable[table[4] + i*6 + 4]); |
4623 | if (script) | 4612 | if (script) |
4624 | script = clkcmptable(bios, script, -pxclk); | 4613 | script = clkcmptable(bios, script, -pclk); |
4625 | if (!script) { | 4614 | if (!script) { |
4626 | NV_DEBUG_KMS(dev, "clock script 1 not found\n"); | 4615 | NV_DEBUG_KMS(dev, "clock script 1 not found\n"); |
4627 | return 1; | 4616 | return 1; |
4628 | } | 4617 | } |
4629 | 4618 | ||
4630 | NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script); | 4619 | NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script); |
4631 | nouveau_bios_run_init_table(dev, script, dcbent); | 4620 | nouveau_bios_run_init_table(dev, script, dcbent, crtc); |
4632 | } | 4621 | } |
4633 | 4622 | ||
4634 | return 0; | 4623 | return 0; |
@@ -5478,14 +5467,6 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios, | |||
5478 | return 0; | 5467 | return 0; |
5479 | } | 5468 | } |
5480 | 5469 | ||
5481 | static int | ||
5482 | parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios, | ||
5483 | struct bit_entry *bitentry) | ||
5484 | { | ||
5485 | bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]); | ||
5486 | return 0; | ||
5487 | } | ||
5488 | |||
5489 | struct bit_table { | 5470 | struct bit_table { |
5490 | const char id; | 5471 | const char id; |
5491 | int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); | 5472 | int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); |
@@ -5559,7 +5540,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset) | |||
5559 | parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds)); | 5540 | parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds)); |
5560 | parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds)); | 5541 | parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds)); |
5561 | parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U)); | 5542 | parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U)); |
5562 | parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport)); | ||
5563 | 5543 | ||
5564 | return 0; | 5544 | return 0; |
5565 | } | 5545 | } |
@@ -5884,9 +5864,15 @@ parse_dcb_gpio_table(struct nvbios *bios) | |||
5884 | } | 5864 | } |
5885 | 5865 | ||
5886 | e->line = (e->entry & 0x0000001f) >> 0; | 5866 | e->line = (e->entry & 0x0000001f) >> 0; |
5887 | e->state_default = (e->entry & 0x01000000) >> 24; | 5867 | if (gpio[0] == 0x40) { |
5888 | e->state[0] = (e->entry & 0x18000000) >> 27; | 5868 | e->state_default = (e->entry & 0x01000000) >> 24; |
5889 | e->state[1] = (e->entry & 0x60000000) >> 29; | 5869 | e->state[0] = (e->entry & 0x18000000) >> 27; |
5870 | e->state[1] = (e->entry & 0x60000000) >> 29; | ||
5871 | } else { | ||
5872 | e->state_default = (e->entry & 0x00000080) >> 7; | ||
5873 | e->state[0] = (entry[4] >> 4) & 3; | ||
5874 | e->state[1] = (entry[4] >> 6) & 3; | ||
5875 | } | ||
5890 | } | 5876 | } |
5891 | } | 5877 | } |
5892 | 5878 | ||
@@ -6156,7 +6142,14 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
6156 | } | 6142 | } |
6157 | case OUTPUT_DP: | 6143 | case OUTPUT_DP: |
6158 | entry->dpconf.sor.link = (conf & 0x00000030) >> 4; | 6144 | entry->dpconf.sor.link = (conf & 0x00000030) >> 4; |
6159 | entry->dpconf.link_bw = (conf & 0x00e00000) >> 21; | 6145 | switch ((conf & 0x00e00000) >> 21) { |
6146 | case 0: | ||
6147 | entry->dpconf.link_bw = 162000; | ||
6148 | break; | ||
6149 | default: | ||
6150 | entry->dpconf.link_bw = 270000; | ||
6151 | break; | ||
6152 | } | ||
6160 | switch ((conf & 0x0f000000) >> 24) { | 6153 | switch ((conf & 0x0f000000) >> 24) { |
6161 | case 0xf: | 6154 | case 0xf: |
6162 | entry->dpconf.link_nr = 4; | 6155 | entry->dpconf.link_nr = 4; |
@@ -6769,7 +6762,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) | |||
6769 | 6762 | ||
6770 | void | 6763 | void |
6771 | nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, | 6764 | nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, |
6772 | struct dcb_entry *dcbent) | 6765 | struct dcb_entry *dcbent, int crtc) |
6773 | { | 6766 | { |
6774 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 6767 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
6775 | struct nvbios *bios = &dev_priv->vbios; | 6768 | struct nvbios *bios = &dev_priv->vbios; |
@@ -6777,11 +6770,22 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, | |||
6777 | 6770 | ||
6778 | spin_lock_bh(&bios->lock); | 6771 | spin_lock_bh(&bios->lock); |
6779 | bios->display.output = dcbent; | 6772 | bios->display.output = dcbent; |
6773 | bios->display.crtc = crtc; | ||
6780 | parse_init_table(bios, table, &iexec); | 6774 | parse_init_table(bios, table, &iexec); |
6781 | bios->display.output = NULL; | 6775 | bios->display.output = NULL; |
6782 | spin_unlock_bh(&bios->lock); | 6776 | spin_unlock_bh(&bios->lock); |
6783 | } | 6777 | } |
6784 | 6778 | ||
6779 | void | ||
6780 | nouveau_bios_init_exec(struct drm_device *dev, uint16_t table) | ||
6781 | { | ||
6782 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
6783 | struct nvbios *bios = &dev_priv->vbios; | ||
6784 | struct init_exec iexec = { true, false }; | ||
6785 | |||
6786 | parse_init_table(bios, table, &iexec); | ||
6787 | } | ||
6788 | |||
6785 | static bool NVInitVBIOS(struct drm_device *dev) | 6789 | static bool NVInitVBIOS(struct drm_device *dev) |
6786 | { | 6790 | { |
6787 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 6791 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -6863,9 +6867,8 @@ nouveau_run_vbios_init(struct drm_device *dev) | |||
6863 | 6867 | ||
6864 | if (dev_priv->card_type >= NV_50) { | 6868 | if (dev_priv->card_type >= NV_50) { |
6865 | for (i = 0; i < bios->dcb.entries; i++) { | 6869 | for (i = 0; i < bios->dcb.entries; i++) { |
6866 | nouveau_bios_run_display_table(dev, | 6870 | nouveau_bios_run_display_table(dev, 0, 0, |
6867 | &bios->dcb.entry[i], | 6871 | &bios->dcb.entry[i], -1); |
6868 | 0, 0); | ||
6869 | } | 6872 | } |
6870 | } | 6873 | } |
6871 | 6874 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 050c314119df..8adb69e4a6b1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
@@ -289,8 +289,8 @@ struct nvbios { | |||
289 | 289 | ||
290 | struct { | 290 | struct { |
291 | struct dcb_entry *output; | 291 | struct dcb_entry *output; |
292 | int crtc; | ||
292 | uint16_t script_table_ptr; | 293 | uint16_t script_table_ptr; |
293 | uint16_t dp_table_ptr; | ||
294 | } display; | 294 | } display; |
295 | 295 | ||
296 | struct { | 296 | struct { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 890d50e4d682..7226f419e178 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -956,7 +956,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
956 | break; | 956 | break; |
957 | } | 957 | } |
958 | 958 | ||
959 | if (dev_priv->card_type == NV_C0) | 959 | if (dev_priv->card_type >= NV_C0) |
960 | page_shift = node->page_shift; | 960 | page_shift = node->page_shift; |
961 | else | 961 | else |
962 | page_shift = 12; | 962 | page_shift = 12; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index b0d753f45bbd..a319d5646ea9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -411,13 +411,17 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, | |||
411 | return ret; | 411 | return ret; |
412 | init->channel = chan->id; | 412 | init->channel = chan->id; |
413 | 413 | ||
414 | if (chan->dma.ib_max) | 414 | if (nouveau_vram_pushbuf == 0) { |
415 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | | 415 | if (chan->dma.ib_max) |
416 | NOUVEAU_GEM_DOMAIN_GART; | 416 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | |
417 | else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) | 417 | NOUVEAU_GEM_DOMAIN_GART; |
418 | else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) | ||
419 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; | ||
420 | else | ||
421 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; | ||
422 | } else { | ||
418 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; | 423 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; |
419 | else | 424 | } |
420 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; | ||
421 | 425 | ||
422 | if (dev_priv->card_type < NV_C0) { | 426 | if (dev_priv->card_type < NV_C0) { |
423 | init->subchan[0].handle = NvM2MF; | 427 | init->subchan[0].handle = NvM2MF; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 939d4df07777..e0d275e1c96c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | static void nouveau_connector_hotplug(void *, int); | 40 | static void nouveau_connector_hotplug(void *, int); |
41 | 41 | ||
42 | static struct nouveau_encoder * | 42 | struct nouveau_encoder * |
43 | find_encoder(struct drm_connector *connector, int type) | 43 | find_encoder(struct drm_connector *connector, int type) |
44 | { | 44 | { |
45 | struct drm_device *dev = connector->dev; | 45 | struct drm_device *dev = connector->dev; |
@@ -116,10 +116,6 @@ nouveau_connector_destroy(struct drm_connector *connector) | |||
116 | nouveau_connector_hotplug, connector); | 116 | nouveau_connector_hotplug, connector); |
117 | } | 117 | } |
118 | 118 | ||
119 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS || | ||
120 | connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
121 | nouveau_backlight_exit(connector); | ||
122 | |||
123 | kfree(nv_connector->edid); | 119 | kfree(nv_connector->edid); |
124 | drm_sysfs_connector_remove(connector); | 120 | drm_sysfs_connector_remove(connector); |
125 | drm_connector_cleanup(connector); | 121 | drm_connector_cleanup(connector); |
@@ -712,11 +708,8 @@ nouveau_connector_mode_valid(struct drm_connector *connector, | |||
712 | case OUTPUT_TV: | 708 | case OUTPUT_TV: |
713 | return get_slave_funcs(encoder)->mode_valid(encoder, mode); | 709 | return get_slave_funcs(encoder)->mode_valid(encoder, mode); |
714 | case OUTPUT_DP: | 710 | case OUTPUT_DP: |
715 | if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7) | 711 | max_clock = nv_encoder->dp.link_nr; |
716 | max_clock = nv_encoder->dp.link_nr * 270000; | 712 | max_clock *= nv_encoder->dp.link_bw; |
717 | else | ||
718 | max_clock = nv_encoder->dp.link_nr * 162000; | ||
719 | |||
720 | clock = clock * nouveau_connector_bpp(connector) / 8; | 713 | clock = clock * nouveau_connector_bpp(connector) / 8; |
721 | break; | 714 | break; |
722 | default: | 715 | default: |
@@ -871,7 +864,6 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
871 | dev->mode_config.scaling_mode_property, | 864 | dev->mode_config.scaling_mode_property, |
872 | nv_connector->scaling_mode); | 865 | nv_connector->scaling_mode); |
873 | } | 866 | } |
874 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
875 | /* fall-through */ | 867 | /* fall-through */ |
876 | case DCB_CONNECTOR_TV_0: | 868 | case DCB_CONNECTOR_TV_0: |
877 | case DCB_CONNECTOR_TV_1: | 869 | case DCB_CONNECTOR_TV_1: |
@@ -888,27 +880,20 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
888 | dev->mode_config.dithering_mode_property, | 880 | dev->mode_config.dithering_mode_property, |
889 | nv_connector->use_dithering ? | 881 | nv_connector->use_dithering ? |
890 | DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); | 882 | DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); |
891 | |||
892 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) { | ||
893 | if (dev_priv->card_type >= NV_50) | ||
894 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
895 | else | ||
896 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
897 | } | ||
898 | break; | 883 | break; |
899 | } | 884 | } |
900 | 885 | ||
901 | if (pgpio->irq_register) { | 886 | if (nv_connector->dcb->gpio_tag != 0xff && pgpio->irq_register) { |
902 | pgpio->irq_register(dev, nv_connector->dcb->gpio_tag, | 887 | pgpio->irq_register(dev, nv_connector->dcb->gpio_tag, |
903 | nouveau_connector_hotplug, connector); | 888 | nouveau_connector_hotplug, connector); |
889 | |||
890 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
891 | } else { | ||
892 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
904 | } | 893 | } |
905 | 894 | ||
906 | drm_sysfs_connector_add(connector); | 895 | drm_sysfs_connector_add(connector); |
907 | 896 | ||
908 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS || | ||
909 | connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
910 | nouveau_backlight_init(connector); | ||
911 | |||
912 | dcb->drm = connector; | 897 | dcb->drm = connector; |
913 | return dcb->drm; | 898 | return dcb->drm; |
914 | 899 | ||
@@ -925,22 +910,13 @@ nouveau_connector_hotplug(void *data, int plugged) | |||
925 | struct drm_connector *connector = data; | 910 | struct drm_connector *connector = data; |
926 | struct drm_device *dev = connector->dev; | 911 | struct drm_device *dev = connector->dev; |
927 | 912 | ||
928 | NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", | 913 | NV_DEBUG(dev, "%splugged %s\n", plugged ? "" : "un", |
929 | drm_get_connector_name(connector)); | 914 | drm_get_connector_name(connector)); |
930 | |||
931 | if (connector->encoder && connector->encoder->crtc && | ||
932 | connector->encoder->crtc->enabled) { | ||
933 | struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder); | ||
934 | struct drm_encoder_helper_funcs *helper = | ||
935 | connector->encoder->helper_private; | ||
936 | 915 | ||
937 | if (nv_encoder->dcb->type == OUTPUT_DP) { | 916 | if (plugged) |
938 | if (plugged) | 917 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); |
939 | helper->dpms(connector->encoder, DRM_MODE_DPMS_ON); | 918 | else |
940 | else | 919 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); |
941 | helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); | ||
942 | } | ||
943 | } | ||
944 | 920 | ||
945 | drm_helper_hpd_irq_event(dev); | 921 | drm_helper_hpd_irq_event(dev); |
946 | } | 922 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h index cb1ce2a09162..bf8e1289953d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_crtc.h +++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h | |||
@@ -82,14 +82,13 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc) | |||
82 | } | 82 | } |
83 | 83 | ||
84 | int nv50_crtc_create(struct drm_device *dev, int index); | 84 | int nv50_crtc_create(struct drm_device *dev, int index); |
85 | int nv50_cursor_init(struct nouveau_crtc *); | ||
86 | void nv50_cursor_fini(struct nouveau_crtc *); | ||
87 | int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv, | 85 | int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv, |
88 | uint32_t buffer_handle, uint32_t width, | 86 | uint32_t buffer_handle, uint32_t width, |
89 | uint32_t height); | 87 | uint32_t height); |
90 | int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y); | 88 | int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y); |
91 | 89 | ||
92 | int nv04_cursor_init(struct nouveau_crtc *); | 90 | int nv04_cursor_init(struct nouveau_crtc *); |
91 | int nv50_cursor_init(struct nouveau_crtc *); | ||
93 | 92 | ||
94 | struct nouveau_connector * | 93 | struct nouveau_connector * |
95 | nouveau_crtc_connector_get(struct nouveau_crtc *crtc); | 94 | nouveau_crtc_connector_get(struct nouveau_crtc *crtc); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index eb514ea29377..ddbabefb4273 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -105,9 +105,12 @@ nouveau_framebuffer_init(struct drm_device *dev, | |||
105 | if (dev_priv->chipset == 0x50) | 105 | if (dev_priv->chipset == 0x50) |
106 | nv_fb->r_format |= (tile_flags << 8); | 106 | nv_fb->r_format |= (tile_flags << 8); |
107 | 107 | ||
108 | if (!tile_flags) | 108 | if (!tile_flags) { |
109 | nv_fb->r_pitch = 0x00100000 | fb->pitch; | 109 | if (dev_priv->card_type < NV_D0) |
110 | else { | 110 | nv_fb->r_pitch = 0x00100000 | fb->pitch; |
111 | else | ||
112 | nv_fb->r_pitch = 0x01000000 | fb->pitch; | ||
113 | } else { | ||
111 | u32 mode = nvbo->tile_mode; | 114 | u32 mode = nvbo->tile_mode; |
112 | if (dev_priv->card_type >= NV_C0) | 115 | if (dev_priv->card_type >= NV_C0) |
113 | mode >>= 4; | 116 | mode >>= 4; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 7beb82a0315d..de5efe71fefd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c | |||
@@ -28,418 +28,619 @@ | |||
28 | #include "nouveau_i2c.h" | 28 | #include "nouveau_i2c.h" |
29 | #include "nouveau_connector.h" | 29 | #include "nouveau_connector.h" |
30 | #include "nouveau_encoder.h" | 30 | #include "nouveau_encoder.h" |
31 | #include "nouveau_crtc.h" | ||
32 | |||
33 | /****************************************************************************** | ||
34 | * aux channel util functions | ||
35 | *****************************************************************************/ | ||
36 | #define AUX_DBG(fmt, args...) do { \ | ||
37 | if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_AUXCH) { \ | ||
38 | NV_PRINTK(KERN_DEBUG, dev, "AUXCH(%d): " fmt, ch, ##args); \ | ||
39 | } \ | ||
40 | } while (0) | ||
41 | #define AUX_ERR(fmt, args...) NV_ERROR(dev, "AUXCH(%d): " fmt, ch, ##args) | ||
42 | |||
43 | static void | ||
44 | auxch_fini(struct drm_device *dev, int ch) | ||
45 | { | ||
46 | nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000); | ||
47 | } | ||
31 | 48 | ||
32 | static int | 49 | static int |
33 | auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size) | 50 | auxch_init(struct drm_device *dev, int ch) |
34 | { | 51 | { |
35 | struct drm_device *dev = encoder->dev; | 52 | const u32 unksel = 1; /* nfi which to use, or if it matters.. */ |
36 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 53 | const u32 ureq = unksel ? 0x00100000 : 0x00200000; |
37 | struct nouveau_i2c_chan *auxch; | 54 | const u32 urep = unksel ? 0x01000000 : 0x02000000; |
38 | int ret; | 55 | u32 ctrl, timeout; |
39 | 56 | ||
40 | auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); | 57 | /* wait up to 1ms for any previous transaction to be done... */ |
41 | if (!auxch) | 58 | timeout = 1000; |
42 | return -ENODEV; | 59 | do { |
43 | 60 | ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50)); | |
44 | ret = nouveau_dp_auxch(auxch, 9, address, buf, size); | 61 | udelay(1); |
45 | if (ret) | 62 | if (!timeout--) { |
46 | return ret; | 63 | AUX_ERR("begin idle timeout 0x%08x", ctrl); |
64 | return -EBUSY; | ||
65 | } | ||
66 | } while (ctrl & 0x03010000); | ||
67 | |||
68 | /* set some magic, and wait up to 1ms for it to appear */ | ||
69 | nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq); | ||
70 | timeout = 1000; | ||
71 | do { | ||
72 | ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50)); | ||
73 | udelay(1); | ||
74 | if (!timeout--) { | ||
75 | AUX_ERR("magic wait 0x%08x\n", ctrl); | ||
76 | auxch_fini(dev, ch); | ||
77 | return -EBUSY; | ||
78 | } | ||
79 | } while ((ctrl & 0x03000000) != urep); | ||
47 | 80 | ||
48 | return 0; | 81 | return 0; |
49 | } | 82 | } |
50 | 83 | ||
51 | static int | 84 | static int |
52 | auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size) | 85 | auxch_tx(struct drm_device *dev, int ch, u8 type, u32 addr, u8 *data, u8 size) |
53 | { | 86 | { |
54 | struct drm_device *dev = encoder->dev; | 87 | u32 ctrl, stat, timeout, retries; |
55 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 88 | u32 xbuf[4] = {}; |
56 | struct nouveau_i2c_chan *auxch; | 89 | int ret, i; |
57 | int ret; | ||
58 | 90 | ||
59 | auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); | 91 | AUX_DBG("%d: 0x%08x %d\n", type, addr, size); |
60 | if (!auxch) | ||
61 | return -ENODEV; | ||
62 | 92 | ||
63 | ret = nouveau_dp_auxch(auxch, 8, address, buf, size); | 93 | ret = auxch_init(dev, ch); |
64 | return ret; | 94 | if (ret) |
65 | } | 95 | goto out; |
66 | 96 | ||
67 | static int | 97 | stat = nv_rd32(dev, 0x00e4e8 + (ch * 0x50)); |
68 | nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd) | 98 | if (!(stat & 0x10000000)) { |
69 | { | 99 | AUX_DBG("sink not detected\n"); |
70 | struct drm_device *dev = encoder->dev; | 100 | ret = -ENXIO; |
71 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 101 | goto out; |
72 | uint32_t tmp; | 102 | } |
73 | int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1); | ||
74 | |||
75 | tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); | ||
76 | tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED | | ||
77 | NV50_SOR_DP_CTRL_LANE_MASK); | ||
78 | tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16; | ||
79 | if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN) | ||
80 | tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED; | ||
81 | nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp); | ||
82 | |||
83 | return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1); | ||
84 | } | ||
85 | 103 | ||
86 | static int | 104 | if (!(type & 1)) { |
87 | nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd) | 105 | memcpy(xbuf, data, size); |
88 | { | 106 | for (i = 0; i < 16; i += 4) { |
89 | struct drm_device *dev = encoder->dev; | 107 | AUX_DBG("wr 0x%08x\n", xbuf[i / 4]); |
90 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 108 | nv_wr32(dev, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]); |
91 | uint32_t tmp; | 109 | } |
92 | int reg = 0x614300 + (nv_encoder->or * 0x800); | 110 | } |
93 | 111 | ||
94 | tmp = nv_rd32(dev, reg); | 112 | ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50)); |
95 | tmp &= 0xfff3ffff; | 113 | ctrl &= ~0x0001f0ff; |
96 | if (cmd == DP_LINK_BW_2_7) | 114 | ctrl |= type << 12; |
97 | tmp |= 0x00040000; | 115 | ctrl |= size - 1; |
98 | nv_wr32(dev, reg, tmp); | 116 | nv_wr32(dev, 0x00e4e0 + (ch * 0x50), addr); |
117 | |||
118 | /* retry transaction a number of times on failure... */ | ||
119 | ret = -EREMOTEIO; | ||
120 | for (retries = 0; retries < 32; retries++) { | ||
121 | /* reset, and delay a while if this is a retry */ | ||
122 | nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl); | ||
123 | nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl); | ||
124 | if (retries) | ||
125 | udelay(400); | ||
126 | |||
127 | /* transaction request, wait up to 1ms for it to complete */ | ||
128 | nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl); | ||
129 | |||
130 | timeout = 1000; | ||
131 | do { | ||
132 | ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50)); | ||
133 | udelay(1); | ||
134 | if (!timeout--) { | ||
135 | AUX_ERR("tx req timeout 0x%08x\n", ctrl); | ||
136 | goto out; | ||
137 | } | ||
138 | } while (ctrl & 0x00010000); | ||
99 | 139 | ||
100 | return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1); | 140 | /* read status, and check if transaction completed ok */ |
101 | } | 141 | stat = nv_mask(dev, 0x00e4e8 + (ch * 0x50), 0, 0); |
142 | if (!(stat & 0x000f0f00)) { | ||
143 | ret = 0; | ||
144 | break; | ||
145 | } | ||
102 | 146 | ||
103 | static int | 147 | AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat); |
104 | nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern) | 148 | } |
105 | { | ||
106 | struct drm_device *dev = encoder->dev; | ||
107 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
108 | uint32_t tmp; | ||
109 | uint8_t cmd; | ||
110 | int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1); | ||
111 | int ret; | ||
112 | 149 | ||
113 | tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); | 150 | if (type & 1) { |
114 | tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN; | 151 | for (i = 0; i < 16; i += 4) { |
115 | tmp |= (pattern << 24); | 152 | xbuf[i / 4] = nv_rd32(dev, 0x00e4d0 + (ch * 0x50) + i); |
116 | nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp); | 153 | AUX_DBG("rd 0x%08x\n", xbuf[i / 4]); |
154 | } | ||
155 | memcpy(data, xbuf, size); | ||
156 | } | ||
117 | 157 | ||
118 | ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1); | 158 | out: |
119 | if (ret) | 159 | auxch_fini(dev, ch); |
120 | return ret; | 160 | return ret; |
121 | cmd &= ~DP_TRAINING_PATTERN_MASK; | ||
122 | cmd |= (pattern & DP_TRAINING_PATTERN_MASK); | ||
123 | return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1); | ||
124 | } | 161 | } |
125 | 162 | ||
126 | static int | 163 | static u32 |
127 | nouveau_dp_max_voltage_swing(struct drm_encoder *encoder) | 164 | dp_link_bw_get(struct drm_device *dev, int or, int link) |
128 | { | 165 | { |
129 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 166 | u32 ctrl = nv_rd32(dev, 0x614300 + (or * 0x800)); |
130 | struct drm_device *dev = encoder->dev; | 167 | if (!(ctrl & 0x000c0000)) |
131 | struct bit_displayport_encoder_table_entry *dpse; | 168 | return 162000; |
132 | struct bit_displayport_encoder_table *dpe; | 169 | return 270000; |
133 | int i, dpe_headerlen, max_vs = 0; | 170 | } |
134 | |||
135 | dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); | ||
136 | if (!dpe) | ||
137 | return false; | ||
138 | dpse = (void *)((char *)dpe + dpe_headerlen); | ||
139 | 171 | ||
140 | for (i = 0; i < dpe_headerlen; i++, dpse++) { | 172 | static int |
141 | if (dpse->vs_level > max_vs) | 173 | dp_lane_count_get(struct drm_device *dev, int or, int link) |
142 | max_vs = dpse->vs_level; | 174 | { |
175 | u32 ctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); | ||
176 | switch (ctrl & 0x000f0000) { | ||
177 | case 0x00010000: return 1; | ||
178 | case 0x00030000: return 2; | ||
179 | default: | ||
180 | return 4; | ||
143 | } | 181 | } |
144 | |||
145 | return max_vs; | ||
146 | } | 182 | } |
147 | 183 | ||
148 | static int | 184 | void |
149 | nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs) | 185 | nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp) |
150 | { | 186 | { |
151 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 187 | const u32 symbol = 100000; |
152 | struct drm_device *dev = encoder->dev; | 188 | int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; |
153 | struct bit_displayport_encoder_table_entry *dpse; | 189 | int TU, VTUi, VTUf, VTUa; |
154 | struct bit_displayport_encoder_table *dpe; | 190 | u64 link_data_rate, link_ratio, unk; |
155 | int i, dpe_headerlen, max_pre = 0; | 191 | u32 best_diff = 64 * symbol; |
192 | u32 link_nr, link_bw, r; | ||
193 | |||
194 | /* calculate packed data rate for each lane */ | ||
195 | link_nr = dp_lane_count_get(dev, or, link); | ||
196 | link_data_rate = (clk * bpp / 8) / link_nr; | ||
197 | |||
198 | /* calculate ratio of packed data rate to link symbol rate */ | ||
199 | link_bw = dp_link_bw_get(dev, or, link); | ||
200 | link_ratio = link_data_rate * symbol; | ||
201 | r = do_div(link_ratio, link_bw); | ||
202 | |||
203 | for (TU = 64; TU >= 32; TU--) { | ||
204 | /* calculate average number of valid symbols in each TU */ | ||
205 | u32 tu_valid = link_ratio * TU; | ||
206 | u32 calc, diff; | ||
207 | |||
208 | /* find a hw representation for the fraction.. */ | ||
209 | VTUi = tu_valid / symbol; | ||
210 | calc = VTUi * symbol; | ||
211 | diff = tu_valid - calc; | ||
212 | if (diff) { | ||
213 | if (diff >= (symbol / 2)) { | ||
214 | VTUf = symbol / (symbol - diff); | ||
215 | if (symbol - (VTUf * diff)) | ||
216 | VTUf++; | ||
217 | |||
218 | if (VTUf <= 15) { | ||
219 | VTUa = 1; | ||
220 | calc += symbol - (symbol / VTUf); | ||
221 | } else { | ||
222 | VTUa = 0; | ||
223 | VTUf = 1; | ||
224 | calc += symbol; | ||
225 | } | ||
226 | } else { | ||
227 | VTUa = 0; | ||
228 | VTUf = min((int)(symbol / diff), 15); | ||
229 | calc += symbol / VTUf; | ||
230 | } | ||
156 | 231 | ||
157 | dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); | 232 | diff = calc - tu_valid; |
158 | if (!dpe) | 233 | } else { |
159 | return false; | 234 | /* no remainder, but the hw doesn't like the fractional |
160 | dpse = (void *)((char *)dpe + dpe_headerlen); | 235 | * part to be zero. decrement the integer part and |
236 | * have the fraction add a whole symbol back | ||
237 | */ | ||
238 | VTUa = 0; | ||
239 | VTUf = 1; | ||
240 | VTUi--; | ||
241 | } | ||
161 | 242 | ||
162 | for (i = 0; i < dpe_headerlen; i++, dpse++) { | 243 | if (diff < best_diff) { |
163 | if (dpse->vs_level != vs) | 244 | best_diff = diff; |
164 | continue; | 245 | bestTU = TU; |
246 | bestVTUa = VTUa; | ||
247 | bestVTUf = VTUf; | ||
248 | bestVTUi = VTUi; | ||
249 | if (diff == 0) | ||
250 | break; | ||
251 | } | ||
252 | } | ||
165 | 253 | ||
166 | if (dpse->pre_level > max_pre) | 254 | if (!bestTU) { |
167 | max_pre = dpse->pre_level; | 255 | NV_ERROR(dev, "DP: unable to find suitable config\n"); |
256 | return; | ||
168 | } | 257 | } |
169 | 258 | ||
170 | return max_pre; | 259 | /* XXX close to vbios numbers, but not right */ |
260 | unk = (symbol - link_ratio) * bestTU; | ||
261 | unk *= link_ratio; | ||
262 | r = do_div(unk, symbol); | ||
263 | r = do_div(unk, symbol); | ||
264 | unk += 6; | ||
265 | |||
266 | nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2); | ||
267 | nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 | | ||
268 | bestVTUf << 16 | | ||
269 | bestVTUi << 8 | | ||
270 | unk); | ||
171 | } | 271 | } |
172 | 272 | ||
173 | static bool | 273 | u8 * |
174 | nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config) | 274 | nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry) |
175 | { | 275 | { |
176 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 276 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
177 | struct drm_device *dev = encoder->dev; | 277 | struct nvbios *bios = &dev_priv->vbios; |
178 | struct bit_displayport_encoder_table *dpe; | 278 | struct bit_entry d; |
179 | int ret, i, dpe_headerlen, vs = 0, pre = 0; | 279 | u8 *table; |
180 | uint8_t request[2]; | 280 | int i; |
181 | 281 | ||
182 | dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); | 282 | if (bit_table(dev, 'd', &d)) { |
183 | if (!dpe) | 283 | NV_ERROR(dev, "BIT 'd' table not found\n"); |
184 | return false; | 284 | return NULL; |
185 | 285 | } | |
186 | ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2); | ||
187 | if (ret) | ||
188 | return false; | ||
189 | |||
190 | NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]); | ||
191 | |||
192 | /* Keep all lanes at the same level.. */ | ||
193 | for (i = 0; i < nv_encoder->dp.link_nr; i++) { | ||
194 | int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf; | ||
195 | int lane_vs = lane_req & 3; | ||
196 | int lane_pre = (lane_req >> 2) & 3; | ||
197 | 286 | ||
198 | if (lane_vs > vs) | 287 | if (d.version != 1) { |
199 | vs = lane_vs; | 288 | NV_ERROR(dev, "BIT 'd' table version %d unknown\n", d.version); |
200 | if (lane_pre > pre) | 289 | return NULL; |
201 | pre = lane_pre; | ||
202 | } | 290 | } |
203 | 291 | ||
204 | if (vs >= nouveau_dp_max_voltage_swing(encoder)) { | 292 | table = ROMPTR(bios, d.data[0]); |
205 | vs = nouveau_dp_max_voltage_swing(encoder); | 293 | if (!table) { |
206 | vs |= 4; | 294 | NV_ERROR(dev, "displayport table pointer invalid\n"); |
295 | return NULL; | ||
207 | } | 296 | } |
208 | 297 | ||
209 | if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) { | 298 | switch (table[0]) { |
210 | pre = nouveau_dp_max_pre_emphasis(encoder, vs & 3); | 299 | case 0x20: |
211 | pre |= 4; | 300 | case 0x21: |
301 | case 0x30: | ||
302 | break; | ||
303 | default: | ||
304 | NV_ERROR(dev, "displayport table 0x%02x unknown\n", table[0]); | ||
305 | return NULL; | ||
212 | } | 306 | } |
213 | 307 | ||
214 | /* Update the configuration for all lanes.. */ | 308 | for (i = 0; i < table[3]; i++) { |
215 | for (i = 0; i < nv_encoder->dp.link_nr; i++) | 309 | *entry = ROMPTR(bios, table[table[1] + (i * table[2])]); |
216 | config[i] = (pre << 3) | vs; | 310 | if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0]))) |
311 | return table; | ||
312 | } | ||
217 | 313 | ||
218 | return true; | 314 | NV_ERROR(dev, "displayport encoder table not found\n"); |
315 | return NULL; | ||
219 | } | 316 | } |
220 | 317 | ||
221 | static bool | 318 | /****************************************************************************** |
222 | nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config) | 319 | * link training |
223 | { | 320 | *****************************************************************************/ |
224 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 321 | struct dp_state { |
225 | struct drm_device *dev = encoder->dev; | 322 | struct dcb_entry *dcb; |
226 | struct bit_displayport_encoder_table_entry *dpse; | 323 | u8 *table; |
227 | struct bit_displayport_encoder_table *dpe; | 324 | u8 *entry; |
228 | int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1); | 325 | int auxch; |
229 | int dpe_headerlen, ret, i; | 326 | int crtc; |
327 | int or; | ||
328 | int link; | ||
329 | u8 *dpcd; | ||
330 | int link_nr; | ||
331 | u32 link_bw; | ||
332 | u8 stat[6]; | ||
333 | u8 conf[4]; | ||
334 | }; | ||
230 | 335 | ||
231 | NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n", | 336 | static void |
232 | config[0], config[1], config[2], config[3]); | 337 | dp_set_link_config(struct drm_device *dev, struct dp_state *dp) |
338 | { | ||
339 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
340 | int or = dp->or, link = dp->link; | ||
341 | u8 *entry, sink[2]; | ||
342 | u32 dp_ctrl; | ||
343 | u16 script; | ||
344 | |||
345 | NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); | ||
346 | |||
347 | /* set selected link rate on source */ | ||
348 | switch (dp->link_bw) { | ||
349 | case 270000: | ||
350 | nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00040000); | ||
351 | sink[0] = DP_LINK_BW_2_7; | ||
352 | break; | ||
353 | default: | ||
354 | nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00000000); | ||
355 | sink[0] = DP_LINK_BW_1_62; | ||
356 | break; | ||
357 | } | ||
233 | 358 | ||
234 | dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); | 359 | /* offset +0x0a of each dp encoder table entry is a pointer to another |
235 | if (!dpe) | 360 | * table, that has (among other things) pointers to more scripts that |
236 | return false; | 361 | * need to be executed, this time depending on link speed. |
237 | dpse = (void *)((char *)dpe + dpe_headerlen); | 362 | */ |
363 | entry = ROMPTR(&dev_priv->vbios, dp->entry[10]); | ||
364 | if (entry) { | ||
365 | if (dp->table[0] < 0x30) { | ||
366 | while (dp->link_bw < (ROM16(entry[0]) * 10)) | ||
367 | entry += 4; | ||
368 | script = ROM16(entry[2]); | ||
369 | } else { | ||
370 | while (dp->link_bw < (entry[0] * 27000)) | ||
371 | entry += 3; | ||
372 | script = ROM16(entry[1]); | ||
373 | } | ||
238 | 374 | ||
239 | for (i = 0; i < dpe->record_nr; i++, dpse++) { | 375 | nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); |
240 | if (dpse->vs_level == (config[0] & 3) && | ||
241 | dpse->pre_level == ((config[0] >> 3) & 3)) | ||
242 | break; | ||
243 | } | 376 | } |
244 | BUG_ON(i == dpe->record_nr); | 377 | |
245 | 378 | /* configure lane count on the source */ | |
246 | for (i = 0; i < nv_encoder->dp.link_nr; i++) { | 379 | dp_ctrl = ((1 << dp->link_nr) - 1) << 16; |
247 | const int shift[4] = { 16, 8, 0, 24 }; | 380 | sink[1] = dp->link_nr; |
248 | uint32_t mask = 0xff << shift[i]; | 381 | if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) { |
249 | uint32_t reg0, reg1, reg2; | 382 | dp_ctrl |= 0x00004000; |
250 | 383 | sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | |
251 | reg0 = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask; | ||
252 | reg0 |= (dpse->reg0 << shift[i]); | ||
253 | reg1 = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask; | ||
254 | reg1 |= (dpse->reg1 << shift[i]); | ||
255 | reg2 = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff; | ||
256 | reg2 |= (dpse->reg2 << 8); | ||
257 | nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0); | ||
258 | nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1); | ||
259 | nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2); | ||
260 | } | 384 | } |
261 | 385 | ||
262 | ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4); | 386 | nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x001f4000, dp_ctrl); |
263 | if (ret) | ||
264 | return false; | ||
265 | 387 | ||
266 | return true; | 388 | /* inform the sink of the new configuration */ |
389 | auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2); | ||
267 | } | 390 | } |
268 | 391 | ||
269 | bool | 392 | static void |
270 | nouveau_dp_link_train(struct drm_encoder *encoder) | 393 | dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 tp) |
271 | { | 394 | { |
272 | struct drm_device *dev = encoder->dev; | 395 | u8 sink_tp; |
273 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
274 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
275 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
276 | struct nouveau_connector *nv_connector; | ||
277 | struct bit_displayport_encoder_table *dpe; | ||
278 | int dpe_headerlen; | ||
279 | uint8_t config[4], status[3]; | ||
280 | bool cr_done, cr_max_vs, eq_done, hpd_state; | ||
281 | int ret = 0, i, tries, voltage; | ||
282 | 396 | ||
283 | NV_DEBUG_KMS(dev, "link training!!\n"); | 397 | NV_DEBUG_KMS(dev, "training pattern %d\n", tp); |
284 | 398 | ||
285 | nv_connector = nouveau_encoder_connector_get(nv_encoder); | 399 | nv_mask(dev, NV50_SOR_DP_CTRL(dp->or, dp->link), 0x0f000000, tp << 24); |
286 | if (!nv_connector) | ||
287 | return false; | ||
288 | 400 | ||
289 | dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); | 401 | auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1); |
290 | if (!dpe) { | 402 | sink_tp &= ~DP_TRAINING_PATTERN_MASK; |
291 | NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or); | 403 | sink_tp |= tp; |
292 | return false; | 404 | auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1); |
293 | } | 405 | } |
294 | 406 | ||
295 | /* disable hotplug detect, this flips around on some panels during | 407 | static const u8 nv50_lane_map[] = { 16, 8, 0, 24 }; |
296 | * link training. | 408 | static const u8 nvaf_lane_map[] = { 24, 16, 8, 0 }; |
297 | */ | 409 | |
298 | hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); | 410 | static int |
411 | dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) | ||
412 | { | ||
413 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
414 | u32 mask = 0, drv = 0, pre = 0, unk = 0; | ||
415 | const u8 *shifts; | ||
416 | int link = dp->link; | ||
417 | int or = dp->or; | ||
418 | int i; | ||
419 | |||
420 | if (dev_priv->chipset != 0xaf) | ||
421 | shifts = nv50_lane_map; | ||
422 | else | ||
423 | shifts = nvaf_lane_map; | ||
424 | |||
425 | for (i = 0; i < dp->link_nr; i++) { | ||
426 | u8 *conf = dp->entry + dp->table[4]; | ||
427 | u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf; | ||
428 | u8 lpre = (lane & 0x0c) >> 2; | ||
429 | u8 lvsw = (lane & 0x03) >> 0; | ||
430 | |||
431 | mask |= 0xff << shifts[i]; | ||
432 | unk |= 1 << (shifts[i] >> 3); | ||
433 | |||
434 | dp->conf[i] = (lpre << 3) | lvsw; | ||
435 | if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200) | ||
436 | dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED; | ||
437 | if (lpre == DP_TRAIN_PRE_EMPHASIS_9_5) | ||
438 | dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; | ||
439 | |||
440 | NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]); | ||
441 | |||
442 | if (dp->table[0] < 0x30) { | ||
443 | u8 *last = conf + (dp->entry[4] * dp->table[5]); | ||
444 | while (lvsw != conf[0] || lpre != conf[1]) { | ||
445 | conf += dp->table[5]; | ||
446 | if (conf >= last) | ||
447 | return -EINVAL; | ||
448 | } | ||
449 | |||
450 | conf += 2; | ||
451 | } else { | ||
452 | /* no lookup table anymore, set entries for each | ||
453 | * combination of voltage swing and pre-emphasis | ||
454 | * level allowed by the DP spec. | ||
455 | */ | ||
456 | switch (lvsw) { | ||
457 | case 0: lpre += 0; break; | ||
458 | case 1: lpre += 4; break; | ||
459 | case 2: lpre += 7; break; | ||
460 | case 3: lpre += 9; break; | ||
461 | } | ||
462 | |||
463 | conf = conf + (lpre * dp->table[5]); | ||
464 | conf++; | ||
465 | } | ||
299 | 466 | ||
300 | if (dpe->script0) { | 467 | drv |= conf[0] << shifts[i]; |
301 | NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); | 468 | pre |= conf[1] << shifts[i]; |
302 | nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), | 469 | unk = (unk & ~0x0000ff00) | (conf[2] << 8); |
303 | nv_encoder->dcb); | ||
304 | } | 470 | } |
305 | 471 | ||
306 | train: | 472 | nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, drv); |
307 | cr_done = eq_done = false; | 473 | nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, pre); |
474 | nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff0f, unk); | ||
308 | 475 | ||
309 | /* set link configuration */ | 476 | return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4); |
310 | NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n", | 477 | } |
311 | nv_encoder->dp.link_bw, nv_encoder->dp.link_nr); | ||
312 | 478 | ||
313 | ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw); | 479 | static int |
314 | if (ret) | 480 | dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay) |
315 | return false; | 481 | { |
482 | int ret; | ||
316 | 483 | ||
317 | config[0] = nv_encoder->dp.link_nr; | 484 | udelay(delay); |
318 | if (nv_encoder->dp.dpcd_version >= 0x11 && | ||
319 | nv_encoder->dp.enhanced_frame) | ||
320 | config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
321 | 485 | ||
322 | ret = nouveau_dp_lane_count_set(encoder, config[0]); | 486 | ret = auxch_tx(dev, dp->auxch, 9, DP_LANE0_1_STATUS, dp->stat, 6); |
323 | if (ret) | 487 | if (ret) |
324 | return false; | 488 | return ret; |
325 | 489 | ||
326 | /* clock recovery */ | 490 | NV_DEBUG_KMS(dev, "status %02x %02x %02x %02x %02x %02x\n", |
327 | NV_DEBUG_KMS(dev, "\tbegin cr\n"); | 491 | dp->stat[0], dp->stat[1], dp->stat[2], dp->stat[3], |
328 | ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1); | 492 | dp->stat[4], dp->stat[5]); |
329 | if (ret) | 493 | return 0; |
330 | goto stop; | 494 | } |
331 | 495 | ||
332 | tries = 0; | 496 | static int |
333 | voltage = -1; | 497 | dp_link_train_cr(struct drm_device *dev, struct dp_state *dp) |
334 | memset(config, 0x00, sizeof(config)); | 498 | { |
335 | for (;;) { | 499 | bool cr_done = false, abort = false; |
336 | if (!nouveau_dp_link_train_commit(encoder, config)) | 500 | int voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
337 | break; | 501 | int tries = 0, i; |
338 | 502 | ||
339 | udelay(100); | 503 | dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_1); |
340 | 504 | ||
341 | ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2); | 505 | do { |
342 | if (ret) | 506 | if (dp_link_train_commit(dev, dp) || |
507 | dp_link_train_update(dev, dp, 100)) | ||
343 | break; | 508 | break; |
344 | NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n", | ||
345 | status[0], status[1]); | ||
346 | 509 | ||
347 | cr_done = true; | 510 | cr_done = true; |
348 | cr_max_vs = false; | 511 | for (i = 0; i < dp->link_nr; i++) { |
349 | for (i = 0; i < nv_encoder->dp.link_nr; i++) { | 512 | u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf; |
350 | int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf; | ||
351 | |||
352 | if (!(lane & DP_LANE_CR_DONE)) { | 513 | if (!(lane & DP_LANE_CR_DONE)) { |
353 | cr_done = false; | 514 | cr_done = false; |
354 | if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED) | 515 | if (dp->conf[i] & DP_TRAIN_MAX_SWING_REACHED) |
355 | cr_max_vs = true; | 516 | abort = true; |
356 | break; | 517 | break; |
357 | } | 518 | } |
358 | } | 519 | } |
359 | 520 | ||
360 | if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { | 521 | if ((dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { |
361 | voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 522 | voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
362 | tries = 0; | 523 | tries = 0; |
363 | } | 524 | } |
525 | } while (!cr_done && !abort && ++tries < 5); | ||
364 | 526 | ||
365 | if (cr_done || cr_max_vs || (++tries == 5)) | 527 | return cr_done ? 0 : -1; |
366 | break; | 528 | } |
367 | |||
368 | if (!nouveau_dp_link_train_adjust(encoder, config)) | ||
369 | break; | ||
370 | } | ||
371 | |||
372 | if (!cr_done) | ||
373 | goto stop; | ||
374 | 529 | ||
375 | /* channel equalisation */ | 530 | static int |
376 | NV_DEBUG_KMS(dev, "\tbegin eq\n"); | 531 | dp_link_train_eq(struct drm_device *dev, struct dp_state *dp) |
377 | ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2); | 532 | { |
378 | if (ret) | 533 | bool eq_done, cr_done = true; |
379 | goto stop; | 534 | int tries = 0, i; |
380 | 535 | ||
381 | for (tries = 0; tries <= 5; tries++) { | 536 | dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_2); |
382 | udelay(400); | ||
383 | 537 | ||
384 | ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3); | 538 | do { |
385 | if (ret) | 539 | if (dp_link_train_update(dev, dp, 400)) |
386 | break; | 540 | break; |
387 | NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n", | ||
388 | status[0], status[1]); | ||
389 | 541 | ||
390 | eq_done = true; | 542 | eq_done = !!(dp->stat[2] & DP_INTERLANE_ALIGN_DONE); |
391 | if (!(status[2] & DP_INTERLANE_ALIGN_DONE)) | 543 | for (i = 0; i < dp->link_nr && eq_done; i++) { |
392 | eq_done = false; | 544 | u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf; |
393 | 545 | if (!(lane & DP_LANE_CR_DONE)) | |
394 | for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) { | ||
395 | int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf; | ||
396 | |||
397 | if (!(lane & DP_LANE_CR_DONE)) { | ||
398 | cr_done = false; | 546 | cr_done = false; |
399 | break; | ||
400 | } | ||
401 | |||
402 | if (!(lane & DP_LANE_CHANNEL_EQ_DONE) || | 547 | if (!(lane & DP_LANE_CHANNEL_EQ_DONE) || |
403 | !(lane & DP_LANE_SYMBOL_LOCKED)) { | 548 | !(lane & DP_LANE_SYMBOL_LOCKED)) |
404 | eq_done = false; | 549 | eq_done = false; |
405 | break; | ||
406 | } | ||
407 | } | 550 | } |
408 | 551 | ||
409 | if (eq_done || !cr_done) | 552 | if (dp_link_train_commit(dev, dp)) |
410 | break; | 553 | break; |
554 | } while (!eq_done && cr_done && ++tries <= 5); | ||
411 | 555 | ||
412 | if (!nouveau_dp_link_train_adjust(encoder, config) || | 556 | return eq_done ? 0 : -1; |
413 | !nouveau_dp_link_train_commit(encoder, config)) | 557 | } |
414 | break; | ||
415 | } | ||
416 | 558 | ||
417 | stop: | 559 | bool |
418 | /* end link training */ | 560 | nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate) |
419 | ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE); | 561 | { |
420 | if (ret) | 562 | struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; |
563 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
564 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
565 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); | ||
566 | struct nouveau_connector *nv_connector = | ||
567 | nouveau_encoder_connector_get(nv_encoder); | ||
568 | struct drm_device *dev = encoder->dev; | ||
569 | struct nouveau_i2c_chan *auxch; | ||
570 | const u32 bw_list[] = { 270000, 162000, 0 }; | ||
571 | const u32 *link_bw = bw_list; | ||
572 | struct dp_state dp; | ||
573 | |||
574 | auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); | ||
575 | if (!auxch) | ||
421 | return false; | 576 | return false; |
422 | 577 | ||
423 | /* retry at a lower setting, if possible */ | 578 | dp.table = nouveau_dp_bios_data(dev, nv_encoder->dcb, &dp.entry); |
424 | if (!ret && !(eq_done && cr_done)) { | 579 | if (!dp.table) |
425 | NV_DEBUG_KMS(dev, "\twe failed\n"); | 580 | return -EINVAL; |
426 | if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) { | 581 | |
427 | NV_DEBUG_KMS(dev, "retry link training at low rate\n"); | 582 | dp.dcb = nv_encoder->dcb; |
428 | nv_encoder->dp.link_bw = DP_LINK_BW_1_62; | 583 | dp.crtc = nv_crtc->index; |
429 | goto train; | 584 | dp.auxch = auxch->rd; |
430 | } | 585 | dp.or = nv_encoder->or; |
586 | dp.link = !(nv_encoder->dcb->sorconf.link & 1); | ||
587 | dp.dpcd = nv_encoder->dp.dpcd; | ||
588 | |||
589 | /* some sinks toggle hotplug in response to some of the actions | ||
590 | * we take during link training (DP_SET_POWER is one), we need | ||
591 | * to ignore them for the moment to avoid races. | ||
592 | */ | ||
593 | pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); | ||
594 | |||
595 | /* enable down-spreading, if possible */ | ||
596 | if (dp.table[1] >= 16) { | ||
597 | u16 script = ROM16(dp.entry[14]); | ||
598 | if (nv_encoder->dp.dpcd[3] & 1) | ||
599 | script = ROM16(dp.entry[12]); | ||
600 | |||
601 | nouveau_bios_run_init_table(dev, script, dp.dcb, dp.crtc); | ||
431 | } | 602 | } |
432 | 603 | ||
433 | if (dpe->script1) { | 604 | /* execute pre-train script from vbios */ |
434 | NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); | 605 | nouveau_bios_run_init_table(dev, ROM16(dp.entry[6]), dp.dcb, dp.crtc); |
435 | nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), | 606 | |
436 | nv_encoder->dcb); | 607 | /* start off at highest link rate supported by encoder and display */ |
608 | while (*link_bw > nv_encoder->dp.link_bw) | ||
609 | link_bw++; | ||
610 | |||
611 | while (link_bw[0]) { | ||
612 | /* find minimum required lane count at this link rate */ | ||
613 | dp.link_nr = nv_encoder->dp.link_nr; | ||
614 | while ((dp.link_nr >> 1) * link_bw[0] > datarate) | ||
615 | dp.link_nr >>= 1; | ||
616 | |||
617 | /* drop link rate to minimum with this lane count */ | ||
618 | while ((link_bw[1] * dp.link_nr) > datarate) | ||
619 | link_bw++; | ||
620 | dp.link_bw = link_bw[0]; | ||
621 | |||
622 | /* program selected link configuration */ | ||
623 | dp_set_link_config(dev, &dp); | ||
624 | |||
625 | /* attempt to train the link at this configuration */ | ||
626 | memset(dp.stat, 0x00, sizeof(dp.stat)); | ||
627 | if (!dp_link_train_cr(dev, &dp) && | ||
628 | !dp_link_train_eq(dev, &dp)) | ||
629 | break; | ||
630 | |||
631 | /* retry at lower rate */ | ||
632 | link_bw++; | ||
437 | } | 633 | } |
438 | 634 | ||
439 | /* re-enable hotplug detect */ | 635 | /* finish link training */ |
440 | pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state); | 636 | dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE); |
441 | 637 | ||
442 | return eq_done; | 638 | /* execute post-train script from vbios */ |
639 | nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc); | ||
640 | |||
641 | /* re-enable hotplug detect */ | ||
642 | pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true); | ||
643 | return true; | ||
443 | } | 644 | } |
444 | 645 | ||
445 | bool | 646 | bool |
@@ -447,31 +648,34 @@ nouveau_dp_detect(struct drm_encoder *encoder) | |||
447 | { | 648 | { |
448 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 649 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
449 | struct drm_device *dev = encoder->dev; | 650 | struct drm_device *dev = encoder->dev; |
450 | uint8_t dpcd[4]; | 651 | struct nouveau_i2c_chan *auxch; |
652 | u8 *dpcd = nv_encoder->dp.dpcd; | ||
451 | int ret; | 653 | int ret; |
452 | 654 | ||
453 | ret = auxch_rd(encoder, 0x0000, dpcd, 4); | 655 | auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); |
454 | if (ret) | 656 | if (!auxch) |
455 | return false; | 657 | return false; |
456 | 658 | ||
457 | NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n" | 659 | ret = auxch_tx(dev, auxch->rd, 9, DP_DPCD_REV, dpcd, 8); |
458 | "display: link_bw %d, link_nr %d version 0x%02x\n", | 660 | if (ret) |
459 | nv_encoder->dcb->dpconf.link_bw, | 661 | return false; |
460 | nv_encoder->dcb->dpconf.link_nr, | ||
461 | dpcd[1], dpcd[2] & 0x0f, dpcd[0]); | ||
462 | 662 | ||
463 | nv_encoder->dp.dpcd_version = dpcd[0]; | 663 | nv_encoder->dp.link_bw = 27000 * dpcd[1]; |
664 | nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK; | ||
464 | 665 | ||
465 | nv_encoder->dp.link_bw = dpcd[1]; | 666 | NV_DEBUG_KMS(dev, "display: %dx%d dpcd 0x%02x\n", |
466 | if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 && | 667 | nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]); |
467 | !nv_encoder->dcb->dpconf.link_bw) | 668 | NV_DEBUG_KMS(dev, "encoder: %dx%d\n", |
468 | nv_encoder->dp.link_bw = DP_LINK_BW_1_62; | 669 | nv_encoder->dcb->dpconf.link_nr, |
670 | nv_encoder->dcb->dpconf.link_bw); | ||
469 | 671 | ||
470 | nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK; | 672 | if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr) |
471 | if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr) | ||
472 | nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr; | 673 | nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr; |
674 | if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw) | ||
675 | nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw; | ||
473 | 676 | ||
474 | nv_encoder->dp.enhanced_frame = (dpcd[2] & DP_ENHANCED_FRAME_CAP); | 677 | NV_DEBUG_KMS(dev, "maximum: %dx%d\n", |
678 | nv_encoder->dp.link_nr, nv_encoder->dp.link_bw); | ||
475 | 679 | ||
476 | return true; | 680 | return true; |
477 | } | 681 | } |
@@ -480,105 +684,13 @@ int | |||
480 | nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | 684 | nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, |
481 | uint8_t *data, int data_nr) | 685 | uint8_t *data, int data_nr) |
482 | { | 686 | { |
483 | struct drm_device *dev = auxch->dev; | 687 | return auxch_tx(auxch->dev, auxch->rd, cmd, addr, data, data_nr); |
484 | uint32_t tmp, ctrl, stat = 0, data32[4] = {}; | ||
485 | int ret = 0, i, index = auxch->rd; | ||
486 | |||
487 | NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr); | ||
488 | |||
489 | tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd)); | ||
490 | nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000); | ||
491 | tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd)); | ||
492 | if (!(tmp & 0x01000000)) { | ||
493 | NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp); | ||
494 | ret = -EIO; | ||
495 | goto out; | ||
496 | } | ||
497 | |||
498 | for (i = 0; i < 3; i++) { | ||
499 | tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd)); | ||
500 | if (tmp & NV50_AUXCH_STAT_STATE_READY) | ||
501 | break; | ||
502 | udelay(100); | ||
503 | } | ||
504 | |||
505 | if (i == 3) { | ||
506 | ret = -EBUSY; | ||
507 | goto out; | ||
508 | } | ||
509 | |||
510 | if (!(cmd & 1)) { | ||
511 | memcpy(data32, data, data_nr); | ||
512 | for (i = 0; i < 4; i++) { | ||
513 | NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]); | ||
514 | nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]); | ||
515 | } | ||
516 | } | ||
517 | |||
518 | nv_wr32(dev, NV50_AUXCH_ADDR(index), addr); | ||
519 | ctrl = nv_rd32(dev, NV50_AUXCH_CTRL(index)); | ||
520 | ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN); | ||
521 | ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT); | ||
522 | ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT); | ||
523 | |||
524 | for (i = 0; i < 16; i++) { | ||
525 | nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); | ||
526 | nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); | ||
527 | nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); | ||
528 | if (!nv_wait(dev, NV50_AUXCH_CTRL(index), | ||
529 | 0x00010000, 0x00000000)) { | ||
530 | NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", | ||
531 | nv_rd32(dev, NV50_AUXCH_CTRL(index))); | ||
532 | ret = -EBUSY; | ||
533 | goto out; | ||
534 | } | ||
535 | |||
536 | udelay(400); | ||
537 | |||
538 | stat = nv_rd32(dev, NV50_AUXCH_STAT(index)); | ||
539 | if ((stat & NV50_AUXCH_STAT_REPLY_AUX) != | ||
540 | NV50_AUXCH_STAT_REPLY_AUX_DEFER) | ||
541 | break; | ||
542 | } | ||
543 | |||
544 | if (i == 16) { | ||
545 | NV_ERROR(dev, "auxch DEFER too many times, bailing\n"); | ||
546 | ret = -EREMOTEIO; | ||
547 | goto out; | ||
548 | } | ||
549 | |||
550 | if (cmd & 1) { | ||
551 | if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { | ||
552 | ret = -EREMOTEIO; | ||
553 | goto out; | ||
554 | } | ||
555 | |||
556 | for (i = 0; i < 4; i++) { | ||
557 | data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); | ||
558 | NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); | ||
559 | } | ||
560 | memcpy(data, data32, data_nr); | ||
561 | } | ||
562 | |||
563 | out: | ||
564 | tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd)); | ||
565 | nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000); | ||
566 | tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd)); | ||
567 | if (tmp & 0x01000000) { | ||
568 | NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp); | ||
569 | ret = -EIO; | ||
570 | } | ||
571 | |||
572 | udelay(400); | ||
573 | |||
574 | return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY); | ||
575 | } | 688 | } |
576 | 689 | ||
577 | static int | 690 | static int |
578 | nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) | 691 | nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) |
579 | { | 692 | { |
580 | struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap; | 693 | struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap; |
581 | struct drm_device *dev = auxch->dev; | ||
582 | struct i2c_msg *msg = msgs; | 694 | struct i2c_msg *msg = msgs; |
583 | int ret, mcnt = num; | 695 | int ret, mcnt = num; |
584 | 696 | ||
@@ -602,19 +714,6 @@ nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) | |||
602 | if (ret < 0) | 714 | if (ret < 0) |
603 | return ret; | 715 | return ret; |
604 | 716 | ||
605 | switch (ret & NV50_AUXCH_STAT_REPLY_I2C) { | ||
606 | case NV50_AUXCH_STAT_REPLY_I2C_ACK: | ||
607 | break; | ||
608 | case NV50_AUXCH_STAT_REPLY_I2C_NACK: | ||
609 | return -EREMOTEIO; | ||
610 | case NV50_AUXCH_STAT_REPLY_I2C_DEFER: | ||
611 | udelay(100); | ||
612 | continue; | ||
613 | default: | ||
614 | NV_ERROR(dev, "bad auxch reply: 0x%08x\n", ret); | ||
615 | return -EREMOTEIO; | ||
616 | } | ||
617 | |||
618 | ptr += cnt; | 717 | ptr += cnt; |
619 | remaining -= cnt; | 718 | remaining -= cnt; |
620 | } | 719 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index b30ddd8d2e2a..c1e01f37b9d1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -41,7 +41,7 @@ int nouveau_agpmode = -1; | |||
41 | module_param_named(agpmode, nouveau_agpmode, int, 0400); | 41 | module_param_named(agpmode, nouveau_agpmode, int, 0400); |
42 | 42 | ||
43 | MODULE_PARM_DESC(modeset, "Enable kernel modesetting"); | 43 | MODULE_PARM_DESC(modeset, "Enable kernel modesetting"); |
44 | static int nouveau_modeset = -1; /* kms */ | 44 | int nouveau_modeset = -1; |
45 | module_param_named(modeset, nouveau_modeset, int, 0400); | 45 | module_param_named(modeset, nouveau_modeset, int, 0400); |
46 | 46 | ||
47 | MODULE_PARM_DESC(vbios, "Override default VBIOS location"); | 47 | MODULE_PARM_DESC(vbios, "Override default VBIOS location"); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d7d51deb34b6..29837da1098b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -414,12 +414,13 @@ struct nouveau_gpio_engine { | |||
414 | }; | 414 | }; |
415 | 415 | ||
416 | struct nouveau_pm_voltage_level { | 416 | struct nouveau_pm_voltage_level { |
417 | u8 voltage; | 417 | u32 voltage; /* microvolts */ |
418 | u8 vid; | 418 | u8 vid; |
419 | }; | 419 | }; |
420 | 420 | ||
421 | struct nouveau_pm_voltage { | 421 | struct nouveau_pm_voltage { |
422 | bool supported; | 422 | bool supported; |
423 | u8 version; | ||
423 | u8 vid_mask; | 424 | u8 vid_mask; |
424 | 425 | ||
425 | struct nouveau_pm_voltage_level *level; | 426 | struct nouveau_pm_voltage_level *level; |
@@ -428,17 +429,48 @@ struct nouveau_pm_voltage { | |||
428 | 429 | ||
429 | struct nouveau_pm_memtiming { | 430 | struct nouveau_pm_memtiming { |
430 | int id; | 431 | int id; |
431 | u32 reg_100220; | 432 | u32 reg_0; /* 0x10f290 on Fermi, 0x100220 for older */ |
432 | u32 reg_100224; | 433 | u32 reg_1; |
433 | u32 reg_100228; | 434 | u32 reg_2; |
434 | u32 reg_10022c; | 435 | u32 reg_3; |
435 | u32 reg_100230; | 436 | u32 reg_4; |
436 | u32 reg_100234; | 437 | u32 reg_5; |
437 | u32 reg_100238; | 438 | u32 reg_6; |
438 | u32 reg_10023c; | 439 | u32 reg_7; |
439 | u32 reg_100240; | 440 | u32 reg_8; |
441 | /* To be written to 0x1002c0 */ | ||
442 | u8 CL; | ||
443 | u8 WR; | ||
440 | }; | 444 | }; |
441 | 445 | ||
446 | struct nouveau_pm_tbl_header{ | ||
447 | u8 version; | ||
448 | u8 header_len; | ||
449 | u8 entry_cnt; | ||
450 | u8 entry_len; | ||
451 | }; | ||
452 | |||
453 | struct nouveau_pm_tbl_entry{ | ||
454 | u8 tWR; | ||
455 | u8 tUNK_1; | ||
456 | u8 tCL; | ||
457 | u8 tRP; /* Byte 3 */ | ||
458 | u8 empty_4; | ||
459 | u8 tRAS; /* Byte 5 */ | ||
460 | u8 empty_6; | ||
461 | u8 tRFC; /* Byte 7 */ | ||
462 | u8 empty_8; | ||
463 | u8 tRC; /* Byte 9 */ | ||
464 | u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; | ||
465 | u8 empty_15,empty_16,empty_17; | ||
466 | u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; | ||
467 | }; | ||
468 | |||
469 | /* nouveau_mem.c */ | ||
470 | void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr, | ||
471 | struct nouveau_pm_tbl_entry *e, uint8_t magic_number, | ||
472 | struct nouveau_pm_memtiming *timing); | ||
473 | |||
442 | #define NOUVEAU_PM_MAX_LEVEL 8 | 474 | #define NOUVEAU_PM_MAX_LEVEL 8 |
443 | struct nouveau_pm_level { | 475 | struct nouveau_pm_level { |
444 | struct device_attribute dev_attr; | 476 | struct device_attribute dev_attr; |
@@ -448,11 +480,19 @@ struct nouveau_pm_level { | |||
448 | u32 core; | 480 | u32 core; |
449 | u32 memory; | 481 | u32 memory; |
450 | u32 shader; | 482 | u32 shader; |
451 | u32 unk05; | 483 | u32 rop; |
452 | u32 unk0a; | 484 | u32 copy; |
453 | 485 | u32 daemon; | |
454 | u8 voltage; | 486 | u32 vdec; |
455 | u8 fanspeed; | 487 | u32 unk05; /* nv50:nva3, roughly.. */ |
488 | u32 unka0; /* nva3:nvc0 */ | ||
489 | u32 hub01; /* nvc0- */ | ||
490 | u32 hub06; /* nvc0- */ | ||
491 | u32 hub07; /* nvc0- */ | ||
492 | |||
493 | u32 volt_min; /* microvolts */ | ||
494 | u32 volt_max; | ||
495 | u8 fanspeed; | ||
456 | 496 | ||
457 | u16 memscript; | 497 | u16 memscript; |
458 | struct nouveau_pm_memtiming *timing; | 498 | struct nouveau_pm_memtiming *timing; |
@@ -496,6 +536,11 @@ struct nouveau_pm_engine { | |||
496 | void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, | 536 | void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, |
497 | u32 id, int khz); | 537 | u32 id, int khz); |
498 | void (*clock_set)(struct drm_device *, void *); | 538 | void (*clock_set)(struct drm_device *, void *); |
539 | |||
540 | int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *); | ||
541 | void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *); | ||
542 | void (*clocks_set)(struct drm_device *, void *); | ||
543 | |||
499 | int (*voltage_get)(struct drm_device *); | 544 | int (*voltage_get)(struct drm_device *); |
500 | int (*voltage_set)(struct drm_device *, int voltage); | 545 | int (*voltage_set)(struct drm_device *, int voltage); |
501 | int (*fanspeed_get)(struct drm_device *); | 546 | int (*fanspeed_get)(struct drm_device *); |
@@ -504,7 +549,7 @@ struct nouveau_pm_engine { | |||
504 | }; | 549 | }; |
505 | 550 | ||
506 | struct nouveau_vram_engine { | 551 | struct nouveau_vram_engine { |
507 | struct nouveau_mm *mm; | 552 | struct nouveau_mm mm; |
508 | 553 | ||
509 | int (*init)(struct drm_device *); | 554 | int (*init)(struct drm_device *); |
510 | void (*takedown)(struct drm_device *dev); | 555 | void (*takedown)(struct drm_device *dev); |
@@ -623,6 +668,7 @@ enum nouveau_card_type { | |||
623 | NV_40 = 0x40, | 668 | NV_40 = 0x40, |
624 | NV_50 = 0x50, | 669 | NV_50 = 0x50, |
625 | NV_C0 = 0xc0, | 670 | NV_C0 = 0xc0, |
671 | NV_D0 = 0xd0 | ||
626 | }; | 672 | }; |
627 | 673 | ||
628 | struct drm_nouveau_private { | 674 | struct drm_nouveau_private { |
@@ -633,8 +679,8 @@ struct drm_nouveau_private { | |||
633 | enum nouveau_card_type card_type; | 679 | enum nouveau_card_type card_type; |
634 | /* exact chipset, derived from NV_PMC_BOOT_0 */ | 680 | /* exact chipset, derived from NV_PMC_BOOT_0 */ |
635 | int chipset; | 681 | int chipset; |
636 | int stepping; | ||
637 | int flags; | 682 | int flags; |
683 | u32 crystal; | ||
638 | 684 | ||
639 | void __iomem *mmio; | 685 | void __iomem *mmio; |
640 | 686 | ||
@@ -721,7 +767,6 @@ struct drm_nouveau_private { | |||
721 | uint64_t vram_size; | 767 | uint64_t vram_size; |
722 | uint64_t vram_sys_base; | 768 | uint64_t vram_sys_base; |
723 | 769 | ||
724 | uint64_t fb_phys; | ||
725 | uint64_t fb_available_size; | 770 | uint64_t fb_available_size; |
726 | uint64_t fb_mappable_pages; | 771 | uint64_t fb_mappable_pages; |
727 | uint64_t fb_aper_free; | 772 | uint64_t fb_aper_free; |
@@ -784,6 +829,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) | |||
784 | } | 829 | } |
785 | 830 | ||
786 | /* nouveau_drv.c */ | 831 | /* nouveau_drv.c */ |
832 | extern int nouveau_modeset; | ||
787 | extern int nouveau_agpmode; | 833 | extern int nouveau_agpmode; |
788 | extern int nouveau_duallink; | 834 | extern int nouveau_duallink; |
789 | extern int nouveau_uscript_lvds; | 835 | extern int nouveau_uscript_lvds; |
@@ -824,6 +870,8 @@ extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout, | |||
824 | uint32_t reg, uint32_t mask, uint32_t val); | 870 | uint32_t reg, uint32_t mask, uint32_t val); |
825 | extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout, | 871 | extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout, |
826 | uint32_t reg, uint32_t mask, uint32_t val); | 872 | uint32_t reg, uint32_t mask, uint32_t val); |
873 | extern bool nouveau_wait_cb(struct drm_device *, u64 timeout, | ||
874 | bool (*cond)(void *), void *); | ||
827 | extern bool nouveau_wait_for_idle(struct drm_device *); | 875 | extern bool nouveau_wait_for_idle(struct drm_device *); |
828 | extern int nouveau_card_init(struct drm_device *); | 876 | extern int nouveau_card_init(struct drm_device *); |
829 | 877 | ||
@@ -1006,15 +1054,15 @@ static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector | |||
1006 | 1054 | ||
1007 | /* nouveau_backlight.c */ | 1055 | /* nouveau_backlight.c */ |
1008 | #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT | 1056 | #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT |
1009 | extern int nouveau_backlight_init(struct drm_connector *); | 1057 | extern int nouveau_backlight_init(struct drm_device *); |
1010 | extern void nouveau_backlight_exit(struct drm_connector *); | 1058 | extern void nouveau_backlight_exit(struct drm_device *); |
1011 | #else | 1059 | #else |
1012 | static inline int nouveau_backlight_init(struct drm_connector *dev) | 1060 | static inline int nouveau_backlight_init(struct drm_device *dev) |
1013 | { | 1061 | { |
1014 | return 0; | 1062 | return 0; |
1015 | } | 1063 | } |
1016 | 1064 | ||
1017 | static inline void nouveau_backlight_exit(struct drm_connector *dev) { } | 1065 | static inline void nouveau_backlight_exit(struct drm_device *dev) { } |
1018 | #endif | 1066 | #endif |
1019 | 1067 | ||
1020 | /* nouveau_bios.c */ | 1068 | /* nouveau_bios.c */ |
@@ -1022,7 +1070,8 @@ extern int nouveau_bios_init(struct drm_device *); | |||
1022 | extern void nouveau_bios_takedown(struct drm_device *dev); | 1070 | extern void nouveau_bios_takedown(struct drm_device *dev); |
1023 | extern int nouveau_run_vbios_init(struct drm_device *); | 1071 | extern int nouveau_run_vbios_init(struct drm_device *); |
1024 | extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table, | 1072 | extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table, |
1025 | struct dcb_entry *); | 1073 | struct dcb_entry *, int crtc); |
1074 | extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table); | ||
1026 | extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *, | 1075 | extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *, |
1027 | enum dcb_gpio_tag); | 1076 | enum dcb_gpio_tag); |
1028 | extern struct dcb_connector_table_entry * | 1077 | extern struct dcb_connector_table_entry * |
@@ -1030,11 +1079,8 @@ nouveau_bios_connector_entry(struct drm_device *, int index); | |||
1030 | extern u32 get_pll_register(struct drm_device *, enum pll_types); | 1079 | extern u32 get_pll_register(struct drm_device *, enum pll_types); |
1031 | extern int get_pll_limits(struct drm_device *, uint32_t limit_match, | 1080 | extern int get_pll_limits(struct drm_device *, uint32_t limit_match, |
1032 | struct pll_lims *); | 1081 | struct pll_lims *); |
1033 | extern int nouveau_bios_run_display_table(struct drm_device *, | 1082 | extern int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk, |
1034 | struct dcb_entry *, | 1083 | struct dcb_entry *, int crtc); |
1035 | uint32_t script, int pxclk); | ||
1036 | extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *, | ||
1037 | int *length); | ||
1038 | extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *); | 1084 | extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *); |
1039 | extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *); | 1085 | extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *); |
1040 | extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk, | 1086 | extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk, |
@@ -1043,6 +1089,7 @@ extern int run_tmds_table(struct drm_device *, struct dcb_entry *, | |||
1043 | int head, int pxclk); | 1089 | int head, int pxclk); |
1044 | extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head, | 1090 | extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head, |
1045 | enum LVDS_script, int pxclk); | 1091 | enum LVDS_script, int pxclk); |
1092 | bool bios_encoder_match(struct dcb_entry *, u32 hash); | ||
1046 | 1093 | ||
1047 | /* nouveau_ttm.c */ | 1094 | /* nouveau_ttm.c */ |
1048 | int nouveau_ttm_global_init(struct drm_nouveau_private *); | 1095 | int nouveau_ttm_global_init(struct drm_nouveau_private *); |
@@ -1053,7 +1100,9 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *); | |||
1053 | int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | 1100 | int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, |
1054 | uint8_t *data, int data_nr); | 1101 | uint8_t *data, int data_nr); |
1055 | bool nouveau_dp_detect(struct drm_encoder *); | 1102 | bool nouveau_dp_detect(struct drm_encoder *); |
1056 | bool nouveau_dp_link_train(struct drm_encoder *); | 1103 | bool nouveau_dp_link_train(struct drm_encoder *, u32 datarate); |
1104 | void nouveau_dp_tu_update(struct drm_device *, int, int, u32, u32); | ||
1105 | u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **); | ||
1057 | 1106 | ||
1058 | /* nv04_fb.c */ | 1107 | /* nv04_fb.c */ |
1059 | extern int nv04_fb_init(struct drm_device *); | 1108 | extern int nv04_fb_init(struct drm_device *); |
@@ -1179,8 +1228,8 @@ extern int nva3_copy_create(struct drm_device *dev); | |||
1179 | /* nvc0_copy.c */ | 1228 | /* nvc0_copy.c */ |
1180 | extern int nvc0_copy_create(struct drm_device *dev, int engine); | 1229 | extern int nvc0_copy_create(struct drm_device *dev, int engine); |
1181 | 1230 | ||
1182 | /* nv40_mpeg.c */ | 1231 | /* nv31_mpeg.c */ |
1183 | extern int nv40_mpeg_create(struct drm_device *dev); | 1232 | extern int nv31_mpeg_create(struct drm_device *dev); |
1184 | 1233 | ||
1185 | /* nv50_mpeg.c */ | 1234 | /* nv50_mpeg.c */ |
1186 | extern int nv50_mpeg_create(struct drm_device *dev); | 1235 | extern int nv50_mpeg_create(struct drm_device *dev); |
@@ -1265,6 +1314,11 @@ extern int nv04_display_create(struct drm_device *); | |||
1265 | extern int nv04_display_init(struct drm_device *); | 1314 | extern int nv04_display_init(struct drm_device *); |
1266 | extern void nv04_display_destroy(struct drm_device *); | 1315 | extern void nv04_display_destroy(struct drm_device *); |
1267 | 1316 | ||
1317 | /* nvd0_display.c */ | ||
1318 | extern int nvd0_display_create(struct drm_device *); | ||
1319 | extern int nvd0_display_init(struct drm_device *); | ||
1320 | extern void nvd0_display_destroy(struct drm_device *); | ||
1321 | |||
1268 | /* nv04_crtc.c */ | 1322 | /* nv04_crtc.c */ |
1269 | extern int nv04_crtc_create(struct drm_device *, int index); | 1323 | extern int nv04_crtc_create(struct drm_device *, int index); |
1270 | 1324 | ||
@@ -1374,6 +1428,8 @@ int nv50_gpio_init(struct drm_device *dev); | |||
1374 | void nv50_gpio_fini(struct drm_device *dev); | 1428 | void nv50_gpio_fini(struct drm_device *dev); |
1375 | int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); | 1429 | int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); |
1376 | int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); | 1430 | int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); |
1431 | int nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); | ||
1432 | int nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); | ||
1377 | int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag, | 1433 | int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag, |
1378 | void (*)(void *, int), void *); | 1434 | void (*)(void *, int), void *); |
1379 | void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag, | 1435 | void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag, |
@@ -1448,6 +1504,8 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val) | |||
1448 | nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val)) | 1504 | nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val)) |
1449 | #define nv_wait_ne(dev, reg, mask, val) \ | 1505 | #define nv_wait_ne(dev, reg, mask, val) \ |
1450 | nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val)) | 1506 | nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val)) |
1507 | #define nv_wait_cb(dev, func, data) \ | ||
1508 | nouveau_wait_cb(dev, 2000000000ULL, (func), (data)) | ||
1451 | 1509 | ||
1452 | /* PRAMIN access */ | 1510 | /* PRAMIN access */ |
1453 | static inline u32 nv_ri32(struct drm_device *dev, unsigned offset) | 1511 | static inline u32 nv_ri32(struct drm_device *dev, unsigned offset) |
@@ -1514,6 +1572,7 @@ enum { | |||
1514 | NOUVEAU_REG_DEBUG_RMVIO = 0x80, | 1572 | NOUVEAU_REG_DEBUG_RMVIO = 0x80, |
1515 | NOUVEAU_REG_DEBUG_VGAATTR = 0x100, | 1573 | NOUVEAU_REG_DEBUG_VGAATTR = 0x100, |
1516 | NOUVEAU_REG_DEBUG_EVO = 0x200, | 1574 | NOUVEAU_REG_DEBUG_EVO = 0x200, |
1575 | NOUVEAU_REG_DEBUG_AUXCH = 0x400 | ||
1517 | }; | 1576 | }; |
1518 | 1577 | ||
1519 | #define NV_REG_DEBUG(type, dev, fmt, arg...) do { \ | 1578 | #define NV_REG_DEBUG(type, dev, fmt, arg...) do { \ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index ae69b61d93db..e5d6e3faff3d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h | |||
@@ -49,17 +49,17 @@ struct nouveau_encoder { | |||
49 | 49 | ||
50 | union { | 50 | union { |
51 | struct { | 51 | struct { |
52 | int mc_unknown; | 52 | u8 dpcd[8]; |
53 | uint32_t unk0; | ||
54 | uint32_t unk1; | ||
55 | int dpcd_version; | ||
56 | int link_nr; | 53 | int link_nr; |
57 | int link_bw; | 54 | int link_bw; |
58 | bool enhanced_frame; | 55 | u32 datarate; |
59 | } dp; | 56 | } dp; |
60 | }; | 57 | }; |
61 | }; | 58 | }; |
62 | 59 | ||
60 | struct nouveau_encoder * | ||
61 | find_encoder(struct drm_connector *connector, int type); | ||
62 | |||
63 | static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc) | 63 | static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc) |
64 | { | 64 | { |
65 | struct drm_encoder_slave *slave = to_encoder_slave(enc); | 65 | struct drm_encoder_slave *slave = to_encoder_slave(enc); |
@@ -83,21 +83,4 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder); | |||
83 | int nv50_sor_create(struct drm_connector *, struct dcb_entry *); | 83 | int nv50_sor_create(struct drm_connector *, struct dcb_entry *); |
84 | int nv50_dac_create(struct drm_connector *, struct dcb_entry *); | 84 | int nv50_dac_create(struct drm_connector *, struct dcb_entry *); |
85 | 85 | ||
86 | struct bit_displayport_encoder_table { | ||
87 | uint32_t match; | ||
88 | uint8_t record_nr; | ||
89 | uint8_t unknown; | ||
90 | uint16_t script0; | ||
91 | uint16_t script1; | ||
92 | uint16_t unknown_table; | ||
93 | } __attribute__ ((packed)); | ||
94 | |||
95 | struct bit_displayport_encoder_table_entry { | ||
96 | uint8_t vs_level; | ||
97 | uint8_t pre_level; | ||
98 | uint8_t reg0; | ||
99 | uint8_t reg1; | ||
100 | uint8_t reg2; | ||
101 | } __attribute__ ((packed)); | ||
102 | |||
103 | #endif /* __NOUVEAU_ENCODER_H__ */ | 86 | #endif /* __NOUVEAU_ENCODER_H__ */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index c919cfc8f2fd..81116cfea275 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -519,7 +519,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) | |||
519 | if (USE_SEMA(dev) && dev_priv->chipset < 0x84) { | 519 | if (USE_SEMA(dev) && dev_priv->chipset < 0x84) { |
520 | struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; | 520 | struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; |
521 | 521 | ||
522 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 522 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, |
523 | mem->start << PAGE_SHIFT, | 523 | mem->start << PAGE_SHIFT, |
524 | mem->size, NV_MEM_ACCESS_RW, | 524 | mem->size, NV_MEM_ACCESS_RW, |
525 | NV_MEM_TARGET_VRAM, &obj); | 525 | NV_MEM_TARGET_VRAM, &obj); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index cb389d014326..f6a27fabcfe0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c | |||
@@ -107,6 +107,13 @@ nv4e_i2c_getsda(void *data) | |||
107 | return !!((nv_rd32(dev, i2c->rd) >> 16) & 8); | 107 | return !!((nv_rd32(dev, i2c->rd) >> 16) & 8); |
108 | } | 108 | } |
109 | 109 | ||
110 | static const uint32_t nv50_i2c_port[] = { | ||
111 | 0x00e138, 0x00e150, 0x00e168, 0x00e180, | ||
112 | 0x00e254, 0x00e274, 0x00e764, 0x00e780, | ||
113 | 0x00e79c, 0x00e7b8 | ||
114 | }; | ||
115 | #define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port) | ||
116 | |||
110 | static int | 117 | static int |
111 | nv50_i2c_getscl(void *data) | 118 | nv50_i2c_getscl(void *data) |
112 | { | 119 | { |
@@ -130,28 +137,32 @@ static void | |||
130 | nv50_i2c_setscl(void *data, int state) | 137 | nv50_i2c_setscl(void *data, int state) |
131 | { | 138 | { |
132 | struct nouveau_i2c_chan *i2c = data; | 139 | struct nouveau_i2c_chan *i2c = data; |
133 | struct drm_device *dev = i2c->dev; | ||
134 | 140 | ||
135 | nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0)); | 141 | nv_wr32(i2c->dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0)); |
136 | } | 142 | } |
137 | 143 | ||
138 | static void | 144 | static void |
139 | nv50_i2c_setsda(void *data, int state) | 145 | nv50_i2c_setsda(void *data, int state) |
140 | { | 146 | { |
141 | struct nouveau_i2c_chan *i2c = data; | 147 | struct nouveau_i2c_chan *i2c = data; |
142 | struct drm_device *dev = i2c->dev; | ||
143 | 148 | ||
144 | nv_wr32(dev, i2c->wr, | 149 | nv_mask(i2c->dev, i2c->wr, 0x00000006, 4 | (state ? 2 : 0)); |
145 | (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0)); | ||
146 | i2c->data = state; | 150 | i2c->data = state; |
147 | } | 151 | } |
148 | 152 | ||
149 | static const uint32_t nv50_i2c_port[] = { | 153 | static int |
150 | 0x00e138, 0x00e150, 0x00e168, 0x00e180, | 154 | nvd0_i2c_getscl(void *data) |
151 | 0x00e254, 0x00e274, 0x00e764, 0x00e780, | 155 | { |
152 | 0x00e79c, 0x00e7b8 | 156 | struct nouveau_i2c_chan *i2c = data; |
153 | }; | 157 | return !!(nv_rd32(i2c->dev, i2c->rd) & 0x10); |
154 | #define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port) | 158 | } |
159 | |||
160 | static int | ||
161 | nvd0_i2c_getsda(void *data) | ||
162 | { | ||
163 | struct nouveau_i2c_chan *i2c = data; | ||
164 | return !!(nv_rd32(i2c->dev, i2c->rd) & 0x20); | ||
165 | } | ||
155 | 166 | ||
156 | int | 167 | int |
157 | nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) | 168 | nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) |
@@ -163,7 +174,8 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) | |||
163 | if (entry->chan) | 174 | if (entry->chan) |
164 | return -EEXIST; | 175 | return -EEXIST; |
165 | 176 | ||
166 | if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) { | 177 | if (dev_priv->card_type >= NV_50 && |
178 | dev_priv->card_type <= NV_C0 && entry->read >= NV50_I2C_PORTS) { | ||
167 | NV_ERROR(dev, "unknown i2c port %d\n", entry->read); | 179 | NV_ERROR(dev, "unknown i2c port %d\n", entry->read); |
168 | return -EINVAL; | 180 | return -EINVAL; |
169 | } | 181 | } |
@@ -192,10 +204,17 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) | |||
192 | case 5: | 204 | case 5: |
193 | i2c->bit.setsda = nv50_i2c_setsda; | 205 | i2c->bit.setsda = nv50_i2c_setsda; |
194 | i2c->bit.setscl = nv50_i2c_setscl; | 206 | i2c->bit.setscl = nv50_i2c_setscl; |
195 | i2c->bit.getsda = nv50_i2c_getsda; | 207 | if (dev_priv->card_type < NV_D0) { |
196 | i2c->bit.getscl = nv50_i2c_getscl; | 208 | i2c->bit.getsda = nv50_i2c_getsda; |
197 | i2c->rd = nv50_i2c_port[entry->read]; | 209 | i2c->bit.getscl = nv50_i2c_getscl; |
198 | i2c->wr = i2c->rd; | 210 | i2c->rd = nv50_i2c_port[entry->read]; |
211 | i2c->wr = i2c->rd; | ||
212 | } else { | ||
213 | i2c->bit.getsda = nvd0_i2c_getsda; | ||
214 | i2c->bit.getscl = nvd0_i2c_getscl; | ||
215 | i2c->rd = 0x00d014 + (entry->read * 0x20); | ||
216 | i2c->wr = i2c->rd; | ||
217 | } | ||
199 | break; | 218 | break; |
200 | case 6: | 219 | case 6: |
201 | i2c->rd = entry->read; | 220 | i2c->rd = entry->read; |
@@ -267,7 +286,10 @@ nouveau_i2c_find(struct drm_device *dev, int index) | |||
267 | val = 0xe001; | 286 | val = 0xe001; |
268 | } | 287 | } |
269 | 288 | ||
270 | nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val); | 289 | /* nfi, but neither auxch or i2c work if it's 1 */ |
290 | nv_mask(dev, reg + 0x0c, 0x00000001, 0x00000000); | ||
291 | /* nfi, but switches auxch vs normal i2c */ | ||
292 | nv_mask(dev, reg + 0x00, 0x0000f003, val); | ||
271 | } | 293 | } |
272 | 294 | ||
273 | if (!i2c->chan && nouveau_i2c_init(dev, i2c, index)) | 295 | if (!i2c->chan && nouveau_i2c_init(dev, i2c, index)) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index f9ae2fc3d6f1..36bec4807701 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -408,8 +408,6 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
408 | if (ret) | 408 | if (ret) |
409 | return ret; | 409 | return ret; |
410 | 410 | ||
411 | dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); | ||
412 | |||
413 | ret = nouveau_ttm_global_init(dev_priv); | 411 | ret = nouveau_ttm_global_init(dev_priv); |
414 | if (ret) | 412 | if (ret) |
415 | return ret; | 413 | return ret; |
@@ -504,35 +502,146 @@ nouveau_mem_gart_init(struct drm_device *dev) | |||
504 | return 0; | 502 | return 0; |
505 | } | 503 | } |
506 | 504 | ||
505 | /* XXX: For now a dummy. More samples required, possibly even a card | ||
506 | * Called from nouveau_perf.c */ | ||
507 | void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr, | ||
508 | struct nouveau_pm_tbl_entry *e, uint8_t magic_number, | ||
509 | struct nouveau_pm_memtiming *timing) { | ||
510 | |||
511 | NV_DEBUG(dev,"Timing entry format unknown, please contact nouveau developers"); | ||
512 | } | ||
513 | |||
514 | void nv40_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr, | ||
515 | struct nouveau_pm_tbl_entry *e, uint8_t magic_number, | ||
516 | struct nouveau_pm_memtiming *timing) { | ||
517 | |||
518 | timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP); | ||
519 | |||
520 | /* XXX: I don't trust the -1's and +1's... they must come | ||
521 | * from somewhere! */ | ||
522 | timing->reg_1 = (e->tWR + 2 + magic_number) << 24 | | ||
523 | 1 << 16 | | ||
524 | (e->tUNK_1 + 2 + magic_number) << 8 | | ||
525 | (e->tCL + 2 - magic_number); | ||
526 | timing->reg_2 = (magic_number << 24 | e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10); | ||
527 | timing->reg_2 |= 0x20200000; | ||
528 | |||
529 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", timing->id, | ||
530 | timing->reg_0, timing->reg_1,timing->reg_2); | ||
531 | } | ||
532 | |||
533 | void nv50_mem_timing_entry(struct drm_device *dev, struct bit_entry *P, struct nouveau_pm_tbl_header *hdr, | ||
534 | struct nouveau_pm_tbl_entry *e, uint8_t magic_number,struct nouveau_pm_memtiming *timing) { | ||
535 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
536 | |||
537 | uint8_t unk18 = 1, | ||
538 | unk19 = 1, | ||
539 | unk20 = 0, | ||
540 | unk21 = 0; | ||
541 | |||
542 | switch (min(hdr->entry_len, (u8) 22)) { | ||
543 | case 22: | ||
544 | unk21 = e->tUNK_21; | ||
545 | case 21: | ||
546 | unk20 = e->tUNK_20; | ||
547 | case 20: | ||
548 | unk19 = e->tUNK_19; | ||
549 | case 19: | ||
550 | unk18 = e->tUNK_18; | ||
551 | break; | ||
552 | } | ||
553 | |||
554 | timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP); | ||
555 | |||
556 | /* XXX: I don't trust the -1's and +1's... they must come | ||
557 | * from somewhere! */ | ||
558 | timing->reg_1 = (e->tWR + unk19 + 1 + magic_number) << 24 | | ||
559 | max(unk18, (u8) 1) << 16 | | ||
560 | (e->tUNK_1 + unk19 + 1 + magic_number) << 8; | ||
561 | if (dev_priv->chipset == 0xa8) { | ||
562 | timing->reg_1 |= (e->tCL - 1); | ||
563 | } else { | ||
564 | timing->reg_1 |= (e->tCL + 2 - magic_number); | ||
565 | } | ||
566 | timing->reg_2 = (e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10); | ||
567 | |||
568 | timing->reg_5 = (e->tRAS << 24 | e->tRC); | ||
569 | timing->reg_5 += max(e->tUNK_10, e->tUNK_11) << 16; | ||
570 | |||
571 | if (P->version == 1) { | ||
572 | timing->reg_2 |= magic_number << 24; | ||
573 | timing->reg_3 = (0x14 + e->tCL) << 24 | | ||
574 | 0x16 << 16 | | ||
575 | (e->tCL - 1) << 8 | | ||
576 | (e->tCL - 1); | ||
577 | timing->reg_4 = (nv_rd32(dev,0x10022c) & 0xffff0000) | e->tUNK_13 << 8 | e->tUNK_13; | ||
578 | timing->reg_5 |= (e->tCL + 2) << 8; | ||
579 | timing->reg_7 = 0x4000202 | (e->tCL - 1) << 16; | ||
580 | } else { | ||
581 | timing->reg_2 |= (unk19 - 1) << 24; | ||
582 | /* XXX: reg_10022c for recentish cards pretty much unknown*/ | ||
583 | timing->reg_3 = e->tCL - 1; | ||
584 | timing->reg_4 = (unk20 << 24 | unk21 << 16 | | ||
585 | e->tUNK_13 << 8 | e->tUNK_13); | ||
586 | /* XXX: +6? */ | ||
587 | timing->reg_5 |= (unk19 + 6) << 8; | ||
588 | |||
589 | /* XXX: reg_10023c currently unknown | ||
590 | * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ | ||
591 | timing->reg_7 = 0x202; | ||
592 | } | ||
593 | |||
594 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", timing->id, | ||
595 | timing->reg_0, timing->reg_1, | ||
596 | timing->reg_2, timing->reg_3); | ||
597 | NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", | ||
598 | timing->reg_4, timing->reg_5, | ||
599 | timing->reg_6, timing->reg_7); | ||
600 | NV_DEBUG(dev, " 240: %08x\n", timing->reg_8); | ||
601 | } | ||
602 | |||
603 | void nvc0_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr, | ||
604 | struct nouveau_pm_tbl_entry *e, struct nouveau_pm_memtiming *timing) { | ||
605 | timing->reg_0 = (e->tRC << 24 | (e->tRFC & 0x7f) << 17 | e->tRAS << 8 | e->tRP); | ||
606 | timing->reg_1 = (nv_rd32(dev,0x10f294) & 0xff000000) | (e->tUNK_11&0x0f) << 20 | (e->tUNK_19 << 7) | (e->tCL & 0x0f); | ||
607 | timing->reg_2 = (nv_rd32(dev,0x10f298) & 0xff0000ff) | e->tWR << 16 | e->tUNK_1 << 8; | ||
608 | timing->reg_3 = e->tUNK_20 << 9 | e->tUNK_13; | ||
609 | timing->reg_4 = (nv_rd32(dev,0x10f2a0) & 0xfff000ff) | e->tUNK_12 << 15; | ||
610 | NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", timing->id, | ||
611 | timing->reg_0, timing->reg_1, | ||
612 | timing->reg_2, timing->reg_3); | ||
613 | NV_DEBUG(dev, " 2a0: %08x %08x %08x %08x\n", | ||
614 | timing->reg_4, timing->reg_5, | ||
615 | timing->reg_6, timing->reg_7); | ||
616 | } | ||
617 | |||
618 | /** | ||
619 | * Processes the Memory Timing BIOS table, stores generated | ||
620 | * register values | ||
621 | * @pre init scripts were run, memtiming regs are initialized | ||
622 | */ | ||
507 | void | 623 | void |
508 | nouveau_mem_timing_init(struct drm_device *dev) | 624 | nouveau_mem_timing_init(struct drm_device *dev) |
509 | { | 625 | { |
510 | /* cards < NVC0 only */ | ||
511 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 626 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
512 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 627 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
513 | struct nouveau_pm_memtimings *memtimings = &pm->memtimings; | 628 | struct nouveau_pm_memtimings *memtimings = &pm->memtimings; |
514 | struct nvbios *bios = &dev_priv->vbios; | 629 | struct nvbios *bios = &dev_priv->vbios; |
515 | struct bit_entry P; | 630 | struct bit_entry P; |
516 | u8 tUNK_0, tUNK_1, tUNK_2; | 631 | struct nouveau_pm_tbl_header *hdr = NULL; |
517 | u8 tRP; /* Byte 3 */ | 632 | uint8_t magic_number; |
518 | u8 tRAS; /* Byte 5 */ | 633 | u8 *entry; |
519 | u8 tRFC; /* Byte 7 */ | 634 | int i; |
520 | u8 tRC; /* Byte 9 */ | ||
521 | u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; | ||
522 | u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; | ||
523 | u8 magic_number = 0; /* Yeah... sorry*/ | ||
524 | u8 *mem = NULL, *entry; | ||
525 | int i, recordlen, entries; | ||
526 | 635 | ||
527 | if (bios->type == NVBIOS_BIT) { | 636 | if (bios->type == NVBIOS_BIT) { |
528 | if (bit_table(dev, 'P', &P)) | 637 | if (bit_table(dev, 'P', &P)) |
529 | return; | 638 | return; |
530 | 639 | ||
531 | if (P.version == 1) | 640 | if (P.version == 1) |
532 | mem = ROMPTR(bios, P.data[4]); | 641 | hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[4]); |
533 | else | 642 | else |
534 | if (P.version == 2) | 643 | if (P.version == 2) |
535 | mem = ROMPTR(bios, P.data[8]); | 644 | hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[8]); |
536 | else { | 645 | else { |
537 | NV_WARN(dev, "unknown mem for BIT P %d\n", P.version); | 646 | NV_WARN(dev, "unknown mem for BIT P %d\n", P.version); |
538 | } | 647 | } |
@@ -541,150 +650,56 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
541 | return; | 650 | return; |
542 | } | 651 | } |
543 | 652 | ||
544 | if (!mem) { | 653 | if (!hdr) { |
545 | NV_DEBUG(dev, "memory timing table pointer invalid\n"); | 654 | NV_DEBUG(dev, "memory timing table pointer invalid\n"); |
546 | return; | 655 | return; |
547 | } | 656 | } |
548 | 657 | ||
549 | if (mem[0] != 0x10) { | 658 | if (hdr->version != 0x10) { |
550 | NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]); | 659 | NV_WARN(dev, "memory timing table 0x%02x unknown\n", hdr->version); |
551 | return; | 660 | return; |
552 | } | 661 | } |
553 | 662 | ||
554 | /* validate record length */ | 663 | /* validate record length */ |
555 | entries = mem[2]; | 664 | if (hdr->entry_len < 15) { |
556 | recordlen = mem[3]; | 665 | NV_ERROR(dev, "mem timing table length unknown: %d\n", hdr->entry_len); |
557 | if (recordlen < 15) { | ||
558 | NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]); | ||
559 | return; | 666 | return; |
560 | } | 667 | } |
561 | 668 | ||
562 | /* parse vbios entries into common format */ | 669 | /* parse vbios entries into common format */ |
563 | memtimings->timing = | 670 | memtimings->timing = |
564 | kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); | 671 | kcalloc(hdr->entry_cnt, sizeof(*memtimings->timing), GFP_KERNEL); |
565 | if (!memtimings->timing) | 672 | if (!memtimings->timing) |
566 | return; | 673 | return; |
567 | 674 | ||
568 | /* Get "some number" from the timing reg for NV_40 and NV_50 | 675 | /* Get "some number" from the timing reg for NV_40 and NV_50 |
569 | * Used in calculations later */ | 676 | * Used in calculations later... source unknown */ |
570 | if (dev_priv->card_type >= NV_40 && dev_priv->chipset < 0x98) { | 677 | magic_number = 0; |
678 | if (P.version == 1) { | ||
571 | magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24; | 679 | magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24; |
572 | } | 680 | } |
573 | 681 | ||
574 | entry = mem + mem[1]; | 682 | entry = (u8*) hdr + hdr->header_len; |
575 | for (i = 0; i < entries; i++, entry += recordlen) { | 683 | for (i = 0; i < hdr->entry_cnt; i++, entry += hdr->entry_len) { |
576 | struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; | 684 | struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; |
577 | if (entry[0] == 0) | 685 | if (entry[0] == 0) |
578 | continue; | 686 | continue; |
579 | 687 | ||
580 | tUNK_18 = 1; | ||
581 | tUNK_19 = 1; | ||
582 | tUNK_20 = 0; | ||
583 | tUNK_21 = 0; | ||
584 | switch (min(recordlen, 22)) { | ||
585 | case 22: | ||
586 | tUNK_21 = entry[21]; | ||
587 | case 21: | ||
588 | tUNK_20 = entry[20]; | ||
589 | case 20: | ||
590 | tUNK_19 = entry[19]; | ||
591 | case 19: | ||
592 | tUNK_18 = entry[18]; | ||
593 | default: | ||
594 | tUNK_0 = entry[0]; | ||
595 | tUNK_1 = entry[1]; | ||
596 | tUNK_2 = entry[2]; | ||
597 | tRP = entry[3]; | ||
598 | tRAS = entry[5]; | ||
599 | tRFC = entry[7]; | ||
600 | tRC = entry[9]; | ||
601 | tUNK_10 = entry[10]; | ||
602 | tUNK_11 = entry[11]; | ||
603 | tUNK_12 = entry[12]; | ||
604 | tUNK_13 = entry[13]; | ||
605 | tUNK_14 = entry[14]; | ||
606 | break; | ||
607 | } | ||
608 | |||
609 | timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP); | ||
610 | |||
611 | /* XXX: I don't trust the -1's and +1's... they must come | ||
612 | * from somewhere! */ | ||
613 | timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 | | ||
614 | max(tUNK_18, (u8) 1) << 16 | | ||
615 | (tUNK_1 + tUNK_19 + 1 + magic_number) << 8; | ||
616 | if (dev_priv->chipset == 0xa8) { | ||
617 | timing->reg_100224 |= (tUNK_2 - 1); | ||
618 | } else { | ||
619 | timing->reg_100224 |= (tUNK_2 + 2 - magic_number); | ||
620 | } | ||
621 | |||
622 | timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); | ||
623 | if (dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) | ||
624 | timing->reg_100228 |= (tUNK_19 - 1) << 24; | ||
625 | else | ||
626 | timing->reg_100228 |= magic_number << 24; | ||
627 | |||
628 | if (dev_priv->card_type == NV_40) { | ||
629 | /* NV40: don't know what the rest of the regs are.. | ||
630 | * And don't need to know either */ | ||
631 | timing->reg_100228 |= 0x20200000; | ||
632 | } else if (dev_priv->card_type >= NV_50) { | ||
633 | if (dev_priv->chipset < 0x98 || | ||
634 | (dev_priv->chipset == 0x98 && | ||
635 | dev_priv->stepping <= 0xa1)) { | ||
636 | timing->reg_10022c = (0x14 + tUNK_2) << 24 | | ||
637 | 0x16 << 16 | | ||
638 | (tUNK_2 - 1) << 8 | | ||
639 | (tUNK_2 - 1); | ||
640 | } else { | ||
641 | /* XXX: reg_10022c for recentish cards */ | ||
642 | timing->reg_10022c = tUNK_2 - 1; | ||
643 | } | ||
644 | |||
645 | timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | | ||
646 | tUNK_13 << 8 | tUNK_13); | ||
647 | |||
648 | timing->reg_100234 = (tRAS << 24 | tRC); | ||
649 | timing->reg_100234 += max(tUNK_10, tUNK_11) << 16; | ||
650 | |||
651 | if (dev_priv->chipset < 0x98 || | ||
652 | (dev_priv->chipset == 0x98 && | ||
653 | dev_priv->stepping <= 0xa1)) { | ||
654 | timing->reg_100234 |= (tUNK_2 + 2) << 8; | ||
655 | } else { | ||
656 | /* XXX: +6? */ | ||
657 | timing->reg_100234 |= (tUNK_19 + 6) << 8; | ||
658 | } | ||
659 | |||
660 | /* XXX; reg_100238 | ||
661 | * reg_100238: 0x00?????? */ | ||
662 | timing->reg_10023c = 0x202; | ||
663 | if (dev_priv->chipset < 0x98 || | ||
664 | (dev_priv->chipset == 0x98 && | ||
665 | dev_priv->stepping <= 0xa1)) { | ||
666 | timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16; | ||
667 | } else { | ||
668 | /* XXX: reg_10023c | ||
669 | * currently unknown | ||
670 | * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ | ||
671 | } | ||
672 | |||
673 | /* XXX: reg_100240? */ | ||
674 | } | ||
675 | timing->id = i; | 688 | timing->id = i; |
676 | 689 | timing->WR = entry[0]; | |
677 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, | 690 | timing->CL = entry[2]; |
678 | timing->reg_100220, timing->reg_100224, | 691 | |
679 | timing->reg_100228, timing->reg_10022c); | 692 | if(dev_priv->card_type <= NV_40) { |
680 | NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", | 693 | nv40_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]); |
681 | timing->reg_100230, timing->reg_100234, | 694 | } else if(dev_priv->card_type == NV_50){ |
682 | timing->reg_100238, timing->reg_10023c); | 695 | nv50_mem_timing_entry(dev,&P,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]); |
683 | NV_DEBUG(dev, " 240: %08x\n", timing->reg_100240); | 696 | } else if(dev_priv->card_type == NV_C0) { |
697 | nvc0_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,&pm->memtimings.timing[i]); | ||
698 | } | ||
684 | } | 699 | } |
685 | 700 | ||
686 | memtimings->nr_timing = entries; | 701 | memtimings->nr_timing = hdr->entry_cnt; |
687 | memtimings->supported = (dev_priv->chipset <= 0x98); | 702 | memtimings->supported = P.version == 1; |
688 | } | 703 | } |
689 | 704 | ||
690 | void | 705 | void |
@@ -693,7 +708,10 @@ nouveau_mem_timing_fini(struct drm_device *dev) | |||
693 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 708 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
694 | struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings; | 709 | struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings; |
695 | 710 | ||
696 | kfree(mem->timing); | 711 | if(mem->timing) { |
712 | kfree(mem->timing); | ||
713 | mem->timing = NULL; | ||
714 | } | ||
697 | } | 715 | } |
698 | 716 | ||
699 | static int | 717 | static int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c index 1640dec3b823..b29ffb3d1408 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include "nouveau_mm.h" | 27 | #include "nouveau_mm.h" |
28 | 28 | ||
29 | static inline void | 29 | static inline void |
30 | region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) | 30 | region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a) |
31 | { | 31 | { |
32 | list_del(&a->nl_entry); | 32 | list_del(&a->nl_entry); |
33 | list_del(&a->fl_entry); | 33 | list_del(&a->fl_entry); |
@@ -35,7 +35,7 @@ region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) | |||
35 | } | 35 | } |
36 | 36 | ||
37 | static struct nouveau_mm_node * | 37 | static struct nouveau_mm_node * |
38 | region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) | 38 | region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) |
39 | { | 39 | { |
40 | struct nouveau_mm_node *b; | 40 | struct nouveau_mm_node *b; |
41 | 41 | ||
@@ -57,33 +57,33 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) | |||
57 | return b; | 57 | return b; |
58 | } | 58 | } |
59 | 59 | ||
60 | #define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \ | 60 | #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ |
61 | list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) | 61 | list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) |
62 | 62 | ||
63 | void | 63 | void |
64 | nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) | 64 | nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this) |
65 | { | 65 | { |
66 | struct nouveau_mm_node *prev = node(this, prev); | 66 | struct nouveau_mm_node *prev = node(this, prev); |
67 | struct nouveau_mm_node *next = node(this, next); | 67 | struct nouveau_mm_node *next = node(this, next); |
68 | 68 | ||
69 | list_add(&this->fl_entry, &rmm->free); | 69 | list_add(&this->fl_entry, &mm->free); |
70 | this->type = 0; | 70 | this->type = 0; |
71 | 71 | ||
72 | if (prev && prev->type == 0) { | 72 | if (prev && prev->type == 0) { |
73 | prev->length += this->length; | 73 | prev->length += this->length; |
74 | region_put(rmm, this); | 74 | region_put(mm, this); |
75 | this = prev; | 75 | this = prev; |
76 | } | 76 | } |
77 | 77 | ||
78 | if (next && next->type == 0) { | 78 | if (next && next->type == 0) { |
79 | next->offset = this->offset; | 79 | next->offset = this->offset; |
80 | next->length += this->length; | 80 | next->length += this->length; |
81 | region_put(rmm, this); | 81 | region_put(mm, this); |
82 | } | 82 | } |
83 | } | 83 | } |
84 | 84 | ||
85 | int | 85 | int |
86 | nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | 86 | nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc, |
87 | u32 align, struct nouveau_mm_node **pnode) | 87 | u32 align, struct nouveau_mm_node **pnode) |
88 | { | 88 | { |
89 | struct nouveau_mm_node *prev, *this, *next; | 89 | struct nouveau_mm_node *prev, *this, *next; |
@@ -92,17 +92,17 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | |||
92 | u32 splitoff; | 92 | u32 splitoff; |
93 | u32 s, e; | 93 | u32 s, e; |
94 | 94 | ||
95 | list_for_each_entry(this, &rmm->free, fl_entry) { | 95 | list_for_each_entry(this, &mm->free, fl_entry) { |
96 | e = this->offset + this->length; | 96 | e = this->offset + this->length; |
97 | s = this->offset; | 97 | s = this->offset; |
98 | 98 | ||
99 | prev = node(this, prev); | 99 | prev = node(this, prev); |
100 | if (prev && prev->type != type) | 100 | if (prev && prev->type != type) |
101 | s = roundup(s, rmm->block_size); | 101 | s = roundup(s, mm->block_size); |
102 | 102 | ||
103 | next = node(this, next); | 103 | next = node(this, next); |
104 | if (next && next->type != type) | 104 | if (next && next->type != type) |
105 | e = rounddown(e, rmm->block_size); | 105 | e = rounddown(e, mm->block_size); |
106 | 106 | ||
107 | s = (s + align_mask) & ~align_mask; | 107 | s = (s + align_mask) & ~align_mask; |
108 | e &= ~align_mask; | 108 | e &= ~align_mask; |
@@ -110,10 +110,10 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | |||
110 | continue; | 110 | continue; |
111 | 111 | ||
112 | splitoff = s - this->offset; | 112 | splitoff = s - this->offset; |
113 | if (splitoff && !region_split(rmm, this, splitoff)) | 113 | if (splitoff && !region_split(mm, this, splitoff)) |
114 | return -ENOMEM; | 114 | return -ENOMEM; |
115 | 115 | ||
116 | this = region_split(rmm, this, min(size, e - s)); | 116 | this = region_split(mm, this, min(size, e - s)); |
117 | if (!this) | 117 | if (!this) |
118 | return -ENOMEM; | 118 | return -ENOMEM; |
119 | 119 | ||
@@ -127,52 +127,49 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | |||
127 | } | 127 | } |
128 | 128 | ||
129 | int | 129 | int |
130 | nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) | 130 | nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block) |
131 | { | 131 | { |
132 | struct nouveau_mm *rmm; | 132 | struct nouveau_mm_node *node; |
133 | struct nouveau_mm_node *heap; | 133 | |
134 | if (block) { | ||
135 | mutex_init(&mm->mutex); | ||
136 | INIT_LIST_HEAD(&mm->nodes); | ||
137 | INIT_LIST_HEAD(&mm->free); | ||
138 | mm->block_size = block; | ||
139 | mm->heap_nodes = 0; | ||
140 | } | ||
134 | 141 | ||
135 | heap = kzalloc(sizeof(*heap), GFP_KERNEL); | 142 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
136 | if (!heap) | 143 | if (!node) |
137 | return -ENOMEM; | 144 | return -ENOMEM; |
138 | heap->offset = roundup(offset, block); | 145 | node->offset = roundup(offset, mm->block_size); |
139 | heap->length = rounddown(offset + length, block) - heap->offset; | 146 | node->length = rounddown(offset + length, mm->block_size) - node->offset; |
140 | 147 | ||
141 | rmm = kzalloc(sizeof(*rmm), GFP_KERNEL); | 148 | list_add_tail(&node->nl_entry, &mm->nodes); |
142 | if (!rmm) { | 149 | list_add_tail(&node->fl_entry, &mm->free); |
143 | kfree(heap); | 150 | mm->heap_nodes++; |
144 | return -ENOMEM; | ||
145 | } | ||
146 | rmm->block_size = block; | ||
147 | mutex_init(&rmm->mutex); | ||
148 | INIT_LIST_HEAD(&rmm->nodes); | ||
149 | INIT_LIST_HEAD(&rmm->free); | ||
150 | list_add(&heap->nl_entry, &rmm->nodes); | ||
151 | list_add(&heap->fl_entry, &rmm->free); | ||
152 | |||
153 | *prmm = rmm; | ||
154 | return 0; | 151 | return 0; |
155 | } | 152 | } |
156 | 153 | ||
157 | int | 154 | int |
158 | nouveau_mm_fini(struct nouveau_mm **prmm) | 155 | nouveau_mm_fini(struct nouveau_mm *mm) |
159 | { | 156 | { |
160 | struct nouveau_mm *rmm = *prmm; | ||
161 | struct nouveau_mm_node *node, *heap = | 157 | struct nouveau_mm_node *node, *heap = |
162 | list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); | 158 | list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry); |
163 | 159 | int nodes = 0; | |
164 | if (!list_is_singular(&rmm->nodes)) { | 160 | |
165 | printk(KERN_ERR "nouveau_mm not empty at destroy time!\n"); | 161 | list_for_each_entry(node, &mm->nodes, nl_entry) { |
166 | list_for_each_entry(node, &rmm->nodes, nl_entry) { | 162 | if (nodes++ == mm->heap_nodes) { |
167 | printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n", | 163 | printk(KERN_ERR "nouveau_mm in use at destroy time!\n"); |
168 | node->type, node->offset, node->length); | 164 | list_for_each_entry(node, &mm->nodes, nl_entry) { |
165 | printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n", | ||
166 | node->type, node->offset, node->length); | ||
167 | } | ||
168 | WARN_ON(1); | ||
169 | return -EBUSY; | ||
169 | } | 170 | } |
170 | WARN_ON(1); | ||
171 | return -EBUSY; | ||
172 | } | 171 | } |
173 | 172 | ||
174 | kfree(heap); | 173 | kfree(heap); |
175 | kfree(rmm); | ||
176 | *prmm = NULL; | ||
177 | return 0; | 174 | return 0; |
178 | } | 175 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h index b9c016d21553..57a600c35c95 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.h +++ b/drivers/gpu/drm/nouveau/nouveau_mm.h | |||
@@ -42,10 +42,11 @@ struct nouveau_mm { | |||
42 | struct mutex mutex; | 42 | struct mutex mutex; |
43 | 43 | ||
44 | u32 block_size; | 44 | u32 block_size; |
45 | int heap_nodes; | ||
45 | }; | 46 | }; |
46 | 47 | ||
47 | int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block); | 48 | int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); |
48 | int nouveau_mm_fini(struct nouveau_mm **); | 49 | int nouveau_mm_fini(struct nouveau_mm *); |
49 | int nouveau_mm_pre(struct nouveau_mm *); | 50 | int nouveau_mm_pre(struct nouveau_mm *); |
50 | int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, | 51 | int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, |
51 | u32 align, struct nouveau_mm_node **); | 52 | u32 align, struct nouveau_mm_node **); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 159b7c437d3f..02222c540aee 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -693,6 +693,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | |||
693 | static int | 693 | static int |
694 | nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) | 694 | nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) |
695 | { | 695 | { |
696 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
696 | struct drm_device *dev = chan->dev; | 697 | struct drm_device *dev = chan->dev; |
697 | struct nouveau_gpuobj *pgd = NULL; | 698 | struct nouveau_gpuobj *pgd = NULL; |
698 | struct nouveau_vm_pgd *vpgd; | 699 | struct nouveau_vm_pgd *vpgd; |
@@ -722,6 +723,9 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) | |||
722 | nv_wo32(chan->ramin, 0x020c, 0x000000ff); | 723 | nv_wo32(chan->ramin, 0x020c, 0x000000ff); |
723 | 724 | ||
724 | /* map display semaphore buffers into channel's vm */ | 725 | /* map display semaphore buffers into channel's vm */ |
726 | if (dev_priv->card_type >= NV_D0) | ||
727 | return 0; | ||
728 | |||
725 | for (i = 0; i < 2; i++) { | 729 | for (i = 0; i < 2; i++) { |
726 | struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i]; | 730 | struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i]; |
727 | 731 | ||
@@ -746,7 +750,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
746 | int ret, i; | 750 | int ret, i; |
747 | 751 | ||
748 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); | 752 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); |
749 | if (dev_priv->card_type == NV_C0) | 753 | if (dev_priv->card_type >= NV_C0) |
750 | return nvc0_gpuobj_channel_init(chan, vm); | 754 | return nvc0_gpuobj_channel_init(chan, vm); |
751 | 755 | ||
752 | /* Allocate a chunk of memory for per-channel object storage */ | 756 | /* Allocate a chunk of memory for per-channel object storage */ |
@@ -793,7 +797,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
793 | return ret; | 797 | return ret; |
794 | 798 | ||
795 | /* dma objects for display sync channel semaphore blocks */ | 799 | /* dma objects for display sync channel semaphore blocks */ |
796 | for (i = 0; i < 2; i++) { | 800 | for (i = 0; i < dev->mode_config.num_crtc; i++) { |
797 | struct nouveau_gpuobj *sem = NULL; | 801 | struct nouveau_gpuobj *sem = NULL; |
798 | struct nv50_display_crtc *dispc = | 802 | struct nv50_display_crtc *dispc = |
799 | &nv50_display(dev)->crtc[i]; | 803 | &nv50_display(dev)->crtc[i]; |
@@ -875,18 +879,18 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | |||
875 | 879 | ||
876 | NV_DEBUG(dev, "ch%d\n", chan->id); | 880 | NV_DEBUG(dev, "ch%d\n", chan->id); |
877 | 881 | ||
878 | if (dev_priv->card_type >= NV_50) { | 882 | if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) { |
879 | struct nv50_display *disp = nv50_display(dev); | 883 | struct nv50_display *disp = nv50_display(dev); |
880 | 884 | ||
881 | for (i = 0; i < 2; i++) { | 885 | for (i = 0; i < dev->mode_config.num_crtc; i++) { |
882 | struct nv50_display_crtc *dispc = &disp->crtc[i]; | 886 | struct nv50_display_crtc *dispc = &disp->crtc[i]; |
883 | nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]); | 887 | nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]); |
884 | } | 888 | } |
885 | |||
886 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); | ||
887 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); | ||
888 | } | 889 | } |
889 | 890 | ||
891 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); | ||
892 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); | ||
893 | |||
890 | if (drm_mm_initialized(&chan->ramin_heap)) | 894 | if (drm_mm_initialized(&chan->ramin_heap)) |
891 | drm_mm_takedown(&chan->ramin_heap); | 895 | drm_mm_takedown(&chan->ramin_heap); |
892 | nouveau_gpuobj_ref(NULL, &chan->ramin); | 896 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index ef9dec0e6f8b..9f178aa94162 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c | |||
@@ -127,13 +127,57 @@ nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P, | |||
127 | 127 | ||
128 | entry += ramcfg * recordlen; | 128 | entry += ramcfg * recordlen; |
129 | if (entry[1] >= pm->memtimings.nr_timing) { | 129 | if (entry[1] >= pm->memtimings.nr_timing) { |
130 | NV_WARN(dev, "timingset %d does not exist\n", entry[1]); | 130 | if (entry[1] != 0xff) |
131 | NV_WARN(dev, "timingset %d does not exist\n", entry[1]); | ||
131 | return NULL; | 132 | return NULL; |
132 | } | 133 | } |
133 | 134 | ||
134 | return &pm->memtimings.timing[entry[1]]; | 135 | return &pm->memtimings.timing[entry[1]]; |
135 | } | 136 | } |
136 | 137 | ||
138 | static void | ||
139 | nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P, | ||
140 | struct nouveau_pm_level *perflvl) | ||
141 | { | ||
142 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
143 | struct nvbios *bios = &dev_priv->vbios; | ||
144 | u8 *vmap; | ||
145 | int id; | ||
146 | |||
147 | id = perflvl->volt_min; | ||
148 | perflvl->volt_min = 0; | ||
149 | |||
150 | /* boards using voltage table version <0x40 store the voltage | ||
151 | * level directly in the perflvl entry as a multiple of 10mV | ||
152 | */ | ||
153 | if (dev_priv->engine.pm.voltage.version < 0x40) { | ||
154 | perflvl->volt_min = id * 10000; | ||
155 | perflvl->volt_max = perflvl->volt_min; | ||
156 | return; | ||
157 | } | ||
158 | |||
159 | /* on newer ones, the perflvl stores an index into yet another | ||
160 | * vbios table containing a min/max voltage value for the perflvl | ||
161 | */ | ||
162 | if (P->version != 2 || P->length < 34) { | ||
163 | NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n", | ||
164 | P->version, P->length); | ||
165 | return; | ||
166 | } | ||
167 | |||
168 | vmap = ROMPTR(bios, P->data[32]); | ||
169 | if (!vmap) { | ||
170 | NV_DEBUG(dev, "volt map table pointer invalid\n"); | ||
171 | return; | ||
172 | } | ||
173 | |||
174 | if (id < vmap[3]) { | ||
175 | vmap += vmap[1] + (vmap[2] * id); | ||
176 | perflvl->volt_min = ROM32(vmap[0]); | ||
177 | perflvl->volt_max = ROM32(vmap[4]); | ||
178 | } | ||
179 | } | ||
180 | |||
137 | void | 181 | void |
138 | nouveau_perf_init(struct drm_device *dev) | 182 | nouveau_perf_init(struct drm_device *dev) |
139 | { | 183 | { |
@@ -141,6 +185,8 @@ nouveau_perf_init(struct drm_device *dev) | |||
141 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 185 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
142 | struct nvbios *bios = &dev_priv->vbios; | 186 | struct nvbios *bios = &dev_priv->vbios; |
143 | struct bit_entry P; | 187 | struct bit_entry P; |
188 | struct nouveau_pm_memtimings *memtimings = &pm->memtimings; | ||
189 | struct nouveau_pm_tbl_header mt_hdr; | ||
144 | u8 version, headerlen, recordlen, entries; | 190 | u8 version, headerlen, recordlen, entries; |
145 | u8 *perf, *entry; | 191 | u8 *perf, *entry; |
146 | int vid, i; | 192 | int vid, i; |
@@ -188,6 +234,22 @@ nouveau_perf_init(struct drm_device *dev) | |||
188 | } | 234 | } |
189 | 235 | ||
190 | entry = perf + headerlen; | 236 | entry = perf + headerlen; |
237 | |||
238 | /* For version 0x15, initialize memtiming table */ | ||
239 | if(version == 0x15) { | ||
240 | memtimings->timing = | ||
241 | kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); | ||
242 | if(!memtimings) { | ||
243 | NV_WARN(dev,"Could not allocate memtiming table\n"); | ||
244 | return; | ||
245 | } | ||
246 | |||
247 | mt_hdr.entry_cnt = entries; | ||
248 | mt_hdr.entry_len = 14; | ||
249 | mt_hdr.version = version; | ||
250 | mt_hdr.header_len = 4; | ||
251 | } | ||
252 | |||
191 | for (i = 0; i < entries; i++) { | 253 | for (i = 0; i < entries; i++) { |
192 | struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; | 254 | struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; |
193 | 255 | ||
@@ -203,7 +265,8 @@ nouveau_perf_init(struct drm_device *dev) | |||
203 | case 0x13: | 265 | case 0x13: |
204 | case 0x15: | 266 | case 0x15: |
205 | perflvl->fanspeed = entry[55]; | 267 | perflvl->fanspeed = entry[55]; |
206 | perflvl->voltage = (recordlen > 56) ? entry[56] : 0; | 268 | if (recordlen > 56) |
269 | perflvl->volt_min = entry[56]; | ||
207 | perflvl->core = ROM32(entry[1]) * 10; | 270 | perflvl->core = ROM32(entry[1]) * 10; |
208 | perflvl->memory = ROM32(entry[5]) * 20; | 271 | perflvl->memory = ROM32(entry[5]) * 20; |
209 | break; | 272 | break; |
@@ -211,9 +274,10 @@ nouveau_perf_init(struct drm_device *dev) | |||
211 | case 0x23: | 274 | case 0x23: |
212 | case 0x24: | 275 | case 0x24: |
213 | perflvl->fanspeed = entry[4]; | 276 | perflvl->fanspeed = entry[4]; |
214 | perflvl->voltage = entry[5]; | 277 | perflvl->volt_min = entry[5]; |
215 | perflvl->core = ROM16(entry[6]) * 1000; | 278 | perflvl->shader = ROM16(entry[6]) * 1000; |
216 | 279 | perflvl->core = perflvl->shader; | |
280 | perflvl->core += (signed char)entry[8] * 1000; | ||
217 | if (dev_priv->chipset == 0x49 || | 281 | if (dev_priv->chipset == 0x49 || |
218 | dev_priv->chipset == 0x4b) | 282 | dev_priv->chipset == 0x4b) |
219 | perflvl->memory = ROM16(entry[11]) * 1000; | 283 | perflvl->memory = ROM16(entry[11]) * 1000; |
@@ -223,7 +287,7 @@ nouveau_perf_init(struct drm_device *dev) | |||
223 | break; | 287 | break; |
224 | case 0x25: | 288 | case 0x25: |
225 | perflvl->fanspeed = entry[4]; | 289 | perflvl->fanspeed = entry[4]; |
226 | perflvl->voltage = entry[5]; | 290 | perflvl->volt_min = entry[5]; |
227 | perflvl->core = ROM16(entry[6]) * 1000; | 291 | perflvl->core = ROM16(entry[6]) * 1000; |
228 | perflvl->shader = ROM16(entry[10]) * 1000; | 292 | perflvl->shader = ROM16(entry[10]) * 1000; |
229 | perflvl->memory = ROM16(entry[12]) * 1000; | 293 | perflvl->memory = ROM16(entry[12]) * 1000; |
@@ -232,7 +296,7 @@ nouveau_perf_init(struct drm_device *dev) | |||
232 | perflvl->memscript = ROM16(entry[2]); | 296 | perflvl->memscript = ROM16(entry[2]); |
233 | case 0x35: | 297 | case 0x35: |
234 | perflvl->fanspeed = entry[6]; | 298 | perflvl->fanspeed = entry[6]; |
235 | perflvl->voltage = entry[7]; | 299 | perflvl->volt_min = entry[7]; |
236 | perflvl->core = ROM16(entry[8]) * 1000; | 300 | perflvl->core = ROM16(entry[8]) * 1000; |
237 | perflvl->shader = ROM16(entry[10]) * 1000; | 301 | perflvl->shader = ROM16(entry[10]) * 1000; |
238 | perflvl->memory = ROM16(entry[12]) * 1000; | 302 | perflvl->memory = ROM16(entry[12]) * 1000; |
@@ -240,30 +304,34 @@ nouveau_perf_init(struct drm_device *dev) | |||
240 | perflvl->unk05 = ROM16(entry[16]) * 1000; | 304 | perflvl->unk05 = ROM16(entry[16]) * 1000; |
241 | break; | 305 | break; |
242 | case 0x40: | 306 | case 0x40: |
243 | #define subent(n) entry[perf[2] + ((n) * perf[3])] | 307 | #define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000 |
244 | perflvl->fanspeed = 0; /*XXX*/ | 308 | perflvl->fanspeed = 0; /*XXX*/ |
245 | perflvl->voltage = entry[2]; | 309 | perflvl->volt_min = entry[2]; |
246 | if (dev_priv->card_type == NV_50) { | 310 | if (dev_priv->card_type == NV_50) { |
247 | perflvl->core = ROM16(subent(0)) & 0xfff; | 311 | perflvl->core = subent(0); |
248 | perflvl->shader = ROM16(subent(1)) & 0xfff; | 312 | perflvl->shader = subent(1); |
249 | perflvl->memory = ROM16(subent(2)) & 0xfff; | 313 | perflvl->memory = subent(2); |
314 | perflvl->vdec = subent(3); | ||
315 | perflvl->unka0 = subent(4); | ||
250 | } else { | 316 | } else { |
251 | perflvl->shader = ROM16(subent(3)) & 0xfff; | 317 | perflvl->hub06 = subent(0); |
318 | perflvl->hub01 = subent(1); | ||
319 | perflvl->copy = subent(2); | ||
320 | perflvl->shader = subent(3); | ||
321 | perflvl->rop = subent(4); | ||
322 | perflvl->memory = subent(5); | ||
323 | perflvl->vdec = subent(6); | ||
324 | perflvl->daemon = subent(10); | ||
325 | perflvl->hub07 = subent(11); | ||
252 | perflvl->core = perflvl->shader / 2; | 326 | perflvl->core = perflvl->shader / 2; |
253 | perflvl->unk0a = ROM16(subent(4)) & 0xfff; | ||
254 | perflvl->memory = ROM16(subent(5)) & 0xfff; | ||
255 | } | 327 | } |
256 | |||
257 | perflvl->core *= 1000; | ||
258 | perflvl->shader *= 1000; | ||
259 | perflvl->memory *= 1000; | ||
260 | perflvl->unk0a *= 1000; | ||
261 | break; | 328 | break; |
262 | } | 329 | } |
263 | 330 | ||
264 | /* make sure vid is valid */ | 331 | /* make sure vid is valid */ |
265 | if (pm->voltage.supported && perflvl->voltage) { | 332 | nouveau_perf_voltage(dev, &P, perflvl); |
266 | vid = nouveau_volt_vid_lookup(dev, perflvl->voltage); | 333 | if (pm->voltage.supported && perflvl->volt_min) { |
334 | vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min); | ||
267 | if (vid < 0) { | 335 | if (vid < 0) { |
268 | NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i); | 336 | NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i); |
269 | entry += recordlen; | 337 | entry += recordlen; |
@@ -272,7 +340,11 @@ nouveau_perf_init(struct drm_device *dev) | |||
272 | } | 340 | } |
273 | 341 | ||
274 | /* get the corresponding memory timings */ | 342 | /* get the corresponding memory timings */ |
275 | if (version > 0x15) { | 343 | if (version == 0x15) { |
344 | memtimings->timing[i].id = i; | ||
345 | nv30_mem_timing_entry(dev,&mt_hdr,(struct nouveau_pm_tbl_entry*) &entry[41],0,&memtimings->timing[i]); | ||
346 | perflvl->timing = &memtimings->timing[i]; | ||
347 | } else if (version > 0x15) { | ||
276 | /* last 3 args are for < 0x40, ignored for >= 0x40 */ | 348 | /* last 3 args are for < 0x40, ignored for >= 0x40 */ |
277 | perflvl->timing = | 349 | perflvl->timing = |
278 | nouveau_perf_timing(dev, &P, | 350 | nouveau_perf_timing(dev, &P, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index da8d994d5e8a..a539fd257921 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c | |||
@@ -64,18 +64,26 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
64 | if (perflvl == pm->cur) | 64 | if (perflvl == pm->cur) |
65 | return 0; | 65 | return 0; |
66 | 66 | ||
67 | if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) { | 67 | if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) { |
68 | ret = pm->voltage_set(dev, perflvl->voltage); | 68 | ret = pm->voltage_set(dev, perflvl->volt_min); |
69 | if (ret) { | 69 | if (ret) { |
70 | NV_ERROR(dev, "voltage_set %d failed: %d\n", | 70 | NV_ERROR(dev, "voltage_set %d failed: %d\n", |
71 | perflvl->voltage, ret); | 71 | perflvl->volt_min, ret); |
72 | } | 72 | } |
73 | } | 73 | } |
74 | 74 | ||
75 | nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core); | 75 | if (pm->clocks_pre) { |
76 | nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader); | 76 | void *state = pm->clocks_pre(dev, perflvl); |
77 | nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory); | 77 | if (IS_ERR(state)) |
78 | nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05); | 78 | return PTR_ERR(state); |
79 | pm->clocks_set(dev, state); | ||
80 | } else | ||
81 | if (pm->clock_set) { | ||
82 | nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core); | ||
83 | nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader); | ||
84 | nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory); | ||
85 | nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05); | ||
86 | } | ||
79 | 87 | ||
80 | pm->cur = perflvl; | 88 | pm->cur = perflvl; |
81 | return 0; | 89 | return 0; |
@@ -92,9 +100,6 @@ nouveau_pm_profile_set(struct drm_device *dev, const char *profile) | |||
92 | if (nouveau_perflvl_wr != 7777) | 100 | if (nouveau_perflvl_wr != 7777) |
93 | return -EPERM; | 101 | return -EPERM; |
94 | 102 | ||
95 | if (!pm->clock_set) | ||
96 | return -EINVAL; | ||
97 | |||
98 | if (!strncmp(profile, "boot", 4)) | 103 | if (!strncmp(profile, "boot", 4)) |
99 | perflvl = &pm->boot; | 104 | perflvl = &pm->boot; |
100 | else { | 105 | else { |
@@ -123,31 +128,37 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
123 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 128 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
124 | int ret; | 129 | int ret; |
125 | 130 | ||
126 | if (!pm->clock_get) | ||
127 | return -EINVAL; | ||
128 | |||
129 | memset(perflvl, 0, sizeof(*perflvl)); | 131 | memset(perflvl, 0, sizeof(*perflvl)); |
130 | 132 | ||
131 | ret = pm->clock_get(dev, PLL_CORE); | 133 | if (pm->clocks_get) { |
132 | if (ret > 0) | 134 | ret = pm->clocks_get(dev, perflvl); |
133 | perflvl->core = ret; | 135 | if (ret) |
136 | return ret; | ||
137 | } else | ||
138 | if (pm->clock_get) { | ||
139 | ret = pm->clock_get(dev, PLL_CORE); | ||
140 | if (ret > 0) | ||
141 | perflvl->core = ret; | ||
134 | 142 | ||
135 | ret = pm->clock_get(dev, PLL_MEMORY); | 143 | ret = pm->clock_get(dev, PLL_MEMORY); |
136 | if (ret > 0) | 144 | if (ret > 0) |
137 | perflvl->memory = ret; | 145 | perflvl->memory = ret; |
138 | 146 | ||
139 | ret = pm->clock_get(dev, PLL_SHADER); | 147 | ret = pm->clock_get(dev, PLL_SHADER); |
140 | if (ret > 0) | 148 | if (ret > 0) |
141 | perflvl->shader = ret; | 149 | perflvl->shader = ret; |
142 | 150 | ||
143 | ret = pm->clock_get(dev, PLL_UNK05); | 151 | ret = pm->clock_get(dev, PLL_UNK05); |
144 | if (ret > 0) | 152 | if (ret > 0) |
145 | perflvl->unk05 = ret; | 153 | perflvl->unk05 = ret; |
154 | } | ||
146 | 155 | ||
147 | if (pm->voltage.supported && pm->voltage_get) { | 156 | if (pm->voltage.supported && pm->voltage_get) { |
148 | ret = pm->voltage_get(dev); | 157 | ret = pm->voltage_get(dev); |
149 | if (ret > 0) | 158 | if (ret > 0) { |
150 | perflvl->voltage = ret; | 159 | perflvl->volt_min = ret; |
160 | perflvl->volt_max = ret; | ||
161 | } | ||
151 | } | 162 | } |
152 | 163 | ||
153 | return 0; | 164 | return 0; |
@@ -156,7 +167,7 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
156 | static void | 167 | static void |
157 | nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) | 168 | nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) |
158 | { | 169 | { |
159 | char c[16], s[16], v[16], f[16], t[16]; | 170 | char c[16], s[16], v[32], f[16], t[16], m[16]; |
160 | 171 | ||
161 | c[0] = '\0'; | 172 | c[0] = '\0'; |
162 | if (perflvl->core) | 173 | if (perflvl->core) |
@@ -166,9 +177,19 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) | |||
166 | if (perflvl->shader) | 177 | if (perflvl->shader) |
167 | snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000); | 178 | snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000); |
168 | 179 | ||
180 | m[0] = '\0'; | ||
181 | if (perflvl->memory) | ||
182 | snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000); | ||
183 | |||
169 | v[0] = '\0'; | 184 | v[0] = '\0'; |
170 | if (perflvl->voltage) | 185 | if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) { |
171 | snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10); | 186 | snprintf(v, sizeof(v), " voltage %dmV-%dmV", |
187 | perflvl->volt_min / 1000, perflvl->volt_max / 1000); | ||
188 | } else | ||
189 | if (perflvl->volt_min) { | ||
190 | snprintf(v, sizeof(v), " voltage %dmV", | ||
191 | perflvl->volt_min / 1000); | ||
192 | } | ||
172 | 193 | ||
173 | f[0] = '\0'; | 194 | f[0] = '\0'; |
174 | if (perflvl->fanspeed) | 195 | if (perflvl->fanspeed) |
@@ -178,8 +199,7 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) | |||
178 | if (perflvl->timing) | 199 | if (perflvl->timing) |
179 | snprintf(t, sizeof(t), " timing %d", perflvl->timing->id); | 200 | snprintf(t, sizeof(t), " timing %d", perflvl->timing->id); |
180 | 201 | ||
181 | snprintf(ptr, len, "memory %dMHz%s%s%s%s%s\n", perflvl->memory / 1000, | 202 | snprintf(ptr, len, "%s%s%s%s%s%s\n", c, s, m, t, v, f); |
182 | c, s, v, f, t); | ||
183 | } | 203 | } |
184 | 204 | ||
185 | static ssize_t | 205 | static ssize_t |
@@ -190,7 +210,7 @@ nouveau_pm_get_perflvl_info(struct device *d, | |||
190 | char *ptr = buf; | 210 | char *ptr = buf; |
191 | int len = PAGE_SIZE; | 211 | int len = PAGE_SIZE; |
192 | 212 | ||
193 | snprintf(ptr, len, "%d: ", perflvl->id); | 213 | snprintf(ptr, len, "%d:", perflvl->id); |
194 | ptr += strlen(buf); | 214 | ptr += strlen(buf); |
195 | len -= strlen(buf); | 215 | len -= strlen(buf); |
196 | 216 | ||
@@ -211,9 +231,9 @@ nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf) | |||
211 | if (!pm->cur) | 231 | if (!pm->cur) |
212 | snprintf(ptr, len, "setting: boot\n"); | 232 | snprintf(ptr, len, "setting: boot\n"); |
213 | else if (pm->cur == &pm->boot) | 233 | else if (pm->cur == &pm->boot) |
214 | snprintf(ptr, len, "setting: boot\nc: "); | 234 | snprintf(ptr, len, "setting: boot\nc:"); |
215 | else | 235 | else |
216 | snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id); | 236 | snprintf(ptr, len, "setting: static %d\nc:", pm->cur->id); |
217 | ptr += strlen(buf); | 237 | ptr += strlen(buf); |
218 | len -= strlen(buf); | 238 | len -= strlen(buf); |
219 | 239 | ||
@@ -292,7 +312,7 @@ nouveau_sysfs_fini(struct drm_device *dev) | |||
292 | } | 312 | } |
293 | } | 313 | } |
294 | 314 | ||
295 | #ifdef CONFIG_HWMON | 315 | #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) |
296 | static ssize_t | 316 | static ssize_t |
297 | nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) | 317 | nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) |
298 | { | 318 | { |
@@ -409,7 +429,7 @@ static const struct attribute_group hwmon_attrgroup = { | |||
409 | static int | 429 | static int |
410 | nouveau_hwmon_init(struct drm_device *dev) | 430 | nouveau_hwmon_init(struct drm_device *dev) |
411 | { | 431 | { |
412 | #ifdef CONFIG_HWMON | 432 | #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) |
413 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 433 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
414 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 434 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
415 | struct device *hwmon_dev; | 435 | struct device *hwmon_dev; |
@@ -442,7 +462,7 @@ nouveau_hwmon_init(struct drm_device *dev) | |||
442 | static void | 462 | static void |
443 | nouveau_hwmon_fini(struct drm_device *dev) | 463 | nouveau_hwmon_fini(struct drm_device *dev) |
444 | { | 464 | { |
445 | #ifdef CONFIG_HWMON | 465 | #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) |
446 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 466 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
447 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 467 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
448 | 468 | ||
@@ -488,7 +508,7 @@ nouveau_pm_init(struct drm_device *dev) | |||
488 | NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); | 508 | NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); |
489 | for (i = 0; i < pm->nr_perflvl; i++) { | 509 | for (i = 0; i < pm->nr_perflvl; i++) { |
490 | nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); | 510 | nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); |
491 | NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info); | 511 | NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info); |
492 | } | 512 | } |
493 | 513 | ||
494 | /* determine current ("boot") performance level */ | 514 | /* determine current ("boot") performance level */ |
@@ -498,7 +518,7 @@ nouveau_pm_init(struct drm_device *dev) | |||
498 | pm->cur = &pm->boot; | 518 | pm->cur = &pm->boot; |
499 | 519 | ||
500 | nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); | 520 | nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); |
501 | NV_INFO(dev, "c: %s", info); | 521 | NV_INFO(dev, "c:%s", info); |
502 | } | 522 | } |
503 | 523 | ||
504 | /* switch performance levels now if requested */ | 524 | /* switch performance levels now if requested */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h index 4a9838ddacec..8ac02cdd03a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.h +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h | |||
@@ -52,6 +52,11 @@ void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, | |||
52 | u32 id, int khz); | 52 | u32 id, int khz); |
53 | void nv04_pm_clock_set(struct drm_device *, void *); | 53 | void nv04_pm_clock_set(struct drm_device *, void *); |
54 | 54 | ||
55 | /* nv40_pm.c */ | ||
56 | int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *); | ||
57 | void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *); | ||
58 | void nv40_pm_clocks_set(struct drm_device *, void *); | ||
59 | |||
55 | /* nv50_pm.c */ | 60 | /* nv50_pm.c */ |
56 | int nv50_pm_clock_get(struct drm_device *, u32 id); | 61 | int nv50_pm_clock_get(struct drm_device *, u32 id); |
57 | void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, | 62 | void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, |
@@ -59,10 +64,12 @@ void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, | |||
59 | void nv50_pm_clock_set(struct drm_device *, void *); | 64 | void nv50_pm_clock_set(struct drm_device *, void *); |
60 | 65 | ||
61 | /* nva3_pm.c */ | 66 | /* nva3_pm.c */ |
62 | int nva3_pm_clock_get(struct drm_device *, u32 id); | 67 | int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *); |
63 | void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, | 68 | void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *); |
64 | u32 id, int khz); | 69 | void nva3_pm_clocks_set(struct drm_device *, void *); |
65 | void nva3_pm_clock_set(struct drm_device *, void *); | 70 | |
71 | /* nvc0_pm.c */ | ||
72 | int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *); | ||
66 | 73 | ||
67 | /* nouveau_temp.c */ | 74 | /* nouveau_temp.c */ |
68 | void nouveau_temp_init(struct drm_device *dev); | 75 | void nouveau_temp_init(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index f18cdfc3400f..43a96b99e180 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h | |||
@@ -826,9 +826,12 @@ | |||
826 | #define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000 | 826 | #define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000 |
827 | #define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000 | 827 | #define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000 |
828 | #define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000 | 828 | #define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000 |
829 | #define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084 | 829 | #define NV50_PDISP_SOR_PWM_DIV(i) (0x0061c080 + (i) * 0x800) |
830 | #define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000 | 830 | #define NV50_PDISP_SOR_PWM_CTL(i) (0x0061c084 + (i) * 0x800) |
831 | #define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff | 831 | #define NV50_PDISP_SOR_PWM_CTL_NEW 0x80000000 |
832 | #define NVA3_PDISP_SOR_PWM_CTL_UNK 0x40000000 | ||
833 | #define NV50_PDISP_SOR_PWM_CTL_VAL 0x000007ff | ||
834 | #define NVA3_PDISP_SOR_PWM_CTL_VAL 0x00ffffff | ||
832 | #define NV50_SOR_DP_CTRL(i, l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) | 835 | #define NV50_SOR_DP_CTRL(i, l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) |
833 | #define NV50_SOR_DP_CTRL_ENABLED 0x00000001 | 836 | #define NV50_SOR_DP_CTRL_ENABLED 0x00000001 |
834 | #define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000 | 837 | #define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000 |
@@ -843,7 +846,7 @@ | |||
843 | #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 | 846 | #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 |
844 | #define NV50_SOR_DP_UNK118(i, l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) | 847 | #define NV50_SOR_DP_UNK118(i, l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) |
845 | #define NV50_SOR_DP_UNK120(i, l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) | 848 | #define NV50_SOR_DP_UNK120(i, l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) |
846 | #define NV50_SOR_DP_UNK128(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) | 849 | #define NV50_SOR_DP_SCFG(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) |
847 | #define NV50_SOR_DP_UNK130(i, l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) | 850 | #define NV50_SOR_DP_UNK130(i, l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) |
848 | 851 | ||
849 | #define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) | 852 | #define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 2706cb3d871a..b75258a9fe44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -12,8 +12,8 @@ struct nouveau_sgdma_be { | |||
12 | struct drm_device *dev; | 12 | struct drm_device *dev; |
13 | 13 | ||
14 | dma_addr_t *pages; | 14 | dma_addr_t *pages; |
15 | bool *ttm_alloced; | ||
16 | unsigned nr_pages; | 15 | unsigned nr_pages; |
16 | bool unmap_pages; | ||
17 | 17 | ||
18 | u64 offset; | 18 | u64 offset; |
19 | bool bound; | 19 | bool bound; |
@@ -26,43 +26,28 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | |||
26 | { | 26 | { |
27 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 27 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
28 | struct drm_device *dev = nvbe->dev; | 28 | struct drm_device *dev = nvbe->dev; |
29 | int i; | ||
29 | 30 | ||
30 | NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); | 31 | NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); |
31 | 32 | ||
32 | if (nvbe->pages) | 33 | nvbe->pages = dma_addrs; |
33 | return -EINVAL; | 34 | nvbe->nr_pages = num_pages; |
34 | 35 | nvbe->unmap_pages = true; | |
35 | nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL); | ||
36 | if (!nvbe->pages) | ||
37 | return -ENOMEM; | ||
38 | 36 | ||
39 | nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); | 37 | /* this code path isn't called and is incorrect anyways */ |
40 | if (!nvbe->ttm_alloced) { | 38 | if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */ |
41 | kfree(nvbe->pages); | 39 | nvbe->unmap_pages = false; |
42 | nvbe->pages = NULL; | 40 | return 0; |
43 | return -ENOMEM; | ||
44 | } | 41 | } |
45 | 42 | ||
46 | nvbe->nr_pages = 0; | 43 | for (i = 0; i < num_pages; i++) { |
47 | while (num_pages--) { | 44 | nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0, |
48 | /* this code path isn't called and is incorrect anyways */ | 45 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
49 | if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/ | 46 | if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) { |
50 | nvbe->pages[nvbe->nr_pages] = | 47 | nvbe->nr_pages = --i; |
51 | dma_addrs[nvbe->nr_pages]; | 48 | be->func->clear(be); |
52 | nvbe->ttm_alloced[nvbe->nr_pages] = true; | 49 | return -EFAULT; |
53 | } else { | ||
54 | nvbe->pages[nvbe->nr_pages] = | ||
55 | pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, | ||
56 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
57 | if (pci_dma_mapping_error(dev->pdev, | ||
58 | nvbe->pages[nvbe->nr_pages])) { | ||
59 | be->func->clear(be); | ||
60 | return -EFAULT; | ||
61 | } | ||
62 | nvbe->ttm_alloced[nvbe->nr_pages] = false; | ||
63 | } | 50 | } |
64 | |||
65 | nvbe->nr_pages++; | ||
66 | } | 51 | } |
67 | 52 | ||
68 | return 0; | 53 | return 0; |
@@ -72,25 +57,16 @@ static void | |||
72 | nouveau_sgdma_clear(struct ttm_backend *be) | 57 | nouveau_sgdma_clear(struct ttm_backend *be) |
73 | { | 58 | { |
74 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 59 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
75 | struct drm_device *dev; | 60 | struct drm_device *dev = nvbe->dev; |
76 | |||
77 | if (nvbe && nvbe->pages) { | ||
78 | dev = nvbe->dev; | ||
79 | NV_DEBUG(dev, "\n"); | ||
80 | 61 | ||
81 | if (nvbe->bound) | 62 | if (nvbe->bound) |
82 | be->func->unbind(be); | 63 | be->func->unbind(be); |
83 | 64 | ||
65 | if (nvbe->unmap_pages) { | ||
84 | while (nvbe->nr_pages--) { | 66 | while (nvbe->nr_pages--) { |
85 | if (!nvbe->ttm_alloced[nvbe->nr_pages]) | 67 | pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], |
86 | pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], | ||
87 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 68 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
88 | } | 69 | } |
89 | kfree(nvbe->pages); | ||
90 | kfree(nvbe->ttm_alloced); | ||
91 | nvbe->pages = NULL; | ||
92 | nvbe->ttm_alloced = NULL; | ||
93 | nvbe->nr_pages = 0; | ||
94 | } | 70 | } |
95 | } | 71 | } |
96 | 72 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 10656e430b44..82478e0998e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -286,9 +286,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
286 | engine->gpio.get = nv10_gpio_get; | 286 | engine->gpio.get = nv10_gpio_get; |
287 | engine->gpio.set = nv10_gpio_set; | 287 | engine->gpio.set = nv10_gpio_set; |
288 | engine->gpio.irq_enable = NULL; | 288 | engine->gpio.irq_enable = NULL; |
289 | engine->pm.clock_get = nv04_pm_clock_get; | 289 | engine->pm.clocks_get = nv40_pm_clocks_get; |
290 | engine->pm.clock_pre = nv04_pm_clock_pre; | 290 | engine->pm.clocks_pre = nv40_pm_clocks_pre; |
291 | engine->pm.clock_set = nv04_pm_clock_set; | 291 | engine->pm.clocks_set = nv40_pm_clocks_set; |
292 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | 292 | engine->pm.voltage_get = nouveau_voltage_gpio_get; |
293 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | 293 | engine->pm.voltage_set = nouveau_voltage_gpio_set; |
294 | engine->pm.temp_get = nv40_temp_get; | 294 | engine->pm.temp_get = nv40_temp_get; |
@@ -299,7 +299,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
299 | case 0x50: | 299 | case 0x50: |
300 | case 0x80: /* gotta love NVIDIA's consistency.. */ | 300 | case 0x80: /* gotta love NVIDIA's consistency.. */ |
301 | case 0x90: | 301 | case 0x90: |
302 | case 0xA0: | 302 | case 0xa0: |
303 | engine->instmem.init = nv50_instmem_init; | 303 | engine->instmem.init = nv50_instmem_init; |
304 | engine->instmem.takedown = nv50_instmem_takedown; | 304 | engine->instmem.takedown = nv50_instmem_takedown; |
305 | engine->instmem.suspend = nv50_instmem_suspend; | 305 | engine->instmem.suspend = nv50_instmem_suspend; |
@@ -359,9 +359,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
359 | engine->pm.clock_set = nv50_pm_clock_set; | 359 | engine->pm.clock_set = nv50_pm_clock_set; |
360 | break; | 360 | break; |
361 | default: | 361 | default: |
362 | engine->pm.clock_get = nva3_pm_clock_get; | 362 | engine->pm.clocks_get = nva3_pm_clocks_get; |
363 | engine->pm.clock_pre = nva3_pm_clock_pre; | 363 | engine->pm.clocks_pre = nva3_pm_clocks_pre; |
364 | engine->pm.clock_set = nva3_pm_clock_set; | 364 | engine->pm.clocks_set = nva3_pm_clocks_set; |
365 | break; | 365 | break; |
366 | } | 366 | } |
367 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | 367 | engine->pm.voltage_get = nouveau_voltage_gpio_get; |
@@ -376,7 +376,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
376 | engine->vram.put = nv50_vram_del; | 376 | engine->vram.put = nv50_vram_del; |
377 | engine->vram.flags_valid = nv50_vram_flags_valid; | 377 | engine->vram.flags_valid = nv50_vram_flags_valid; |
378 | break; | 378 | break; |
379 | case 0xC0: | 379 | case 0xc0: |
380 | engine->instmem.init = nvc0_instmem_init; | 380 | engine->instmem.init = nvc0_instmem_init; |
381 | engine->instmem.takedown = nvc0_instmem_takedown; | 381 | engine->instmem.takedown = nvc0_instmem_takedown; |
382 | engine->instmem.suspend = nvc0_instmem_suspend; | 382 | engine->instmem.suspend = nvc0_instmem_suspend; |
@@ -422,12 +422,73 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
422 | engine->vram.put = nv50_vram_del; | 422 | engine->vram.put = nv50_vram_del; |
423 | engine->vram.flags_valid = nvc0_vram_flags_valid; | 423 | engine->vram.flags_valid = nvc0_vram_flags_valid; |
424 | engine->pm.temp_get = nv84_temp_get; | 424 | engine->pm.temp_get = nv84_temp_get; |
425 | engine->pm.clocks_get = nvc0_pm_clocks_get; | ||
426 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | ||
427 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | ||
428 | break; | ||
429 | case 0xd0: | ||
430 | engine->instmem.init = nvc0_instmem_init; | ||
431 | engine->instmem.takedown = nvc0_instmem_takedown; | ||
432 | engine->instmem.suspend = nvc0_instmem_suspend; | ||
433 | engine->instmem.resume = nvc0_instmem_resume; | ||
434 | engine->instmem.get = nv50_instmem_get; | ||
435 | engine->instmem.put = nv50_instmem_put; | ||
436 | engine->instmem.map = nv50_instmem_map; | ||
437 | engine->instmem.unmap = nv50_instmem_unmap; | ||
438 | engine->instmem.flush = nv84_instmem_flush; | ||
439 | engine->mc.init = nv50_mc_init; | ||
440 | engine->mc.takedown = nv50_mc_takedown; | ||
441 | engine->timer.init = nv04_timer_init; | ||
442 | engine->timer.read = nv04_timer_read; | ||
443 | engine->timer.takedown = nv04_timer_takedown; | ||
444 | engine->fb.init = nvc0_fb_init; | ||
445 | engine->fb.takedown = nvc0_fb_takedown; | ||
446 | engine->fifo.channels = 128; | ||
447 | engine->fifo.init = nvc0_fifo_init; | ||
448 | engine->fifo.takedown = nvc0_fifo_takedown; | ||
449 | engine->fifo.disable = nvc0_fifo_disable; | ||
450 | engine->fifo.enable = nvc0_fifo_enable; | ||
451 | engine->fifo.reassign = nvc0_fifo_reassign; | ||
452 | engine->fifo.channel_id = nvc0_fifo_channel_id; | ||
453 | engine->fifo.create_context = nvc0_fifo_create_context; | ||
454 | engine->fifo.destroy_context = nvc0_fifo_destroy_context; | ||
455 | engine->fifo.load_context = nvc0_fifo_load_context; | ||
456 | engine->fifo.unload_context = nvc0_fifo_unload_context; | ||
457 | engine->display.early_init = nouveau_stub_init; | ||
458 | engine->display.late_takedown = nouveau_stub_takedown; | ||
459 | engine->display.create = nvd0_display_create; | ||
460 | engine->display.init = nvd0_display_init; | ||
461 | engine->display.destroy = nvd0_display_destroy; | ||
462 | engine->gpio.init = nv50_gpio_init; | ||
463 | engine->gpio.takedown = nouveau_stub_takedown; | ||
464 | engine->gpio.get = nvd0_gpio_get; | ||
465 | engine->gpio.set = nvd0_gpio_set; | ||
466 | engine->gpio.irq_register = nv50_gpio_irq_register; | ||
467 | engine->gpio.irq_unregister = nv50_gpio_irq_unregister; | ||
468 | engine->gpio.irq_enable = nv50_gpio_irq_enable; | ||
469 | engine->vram.init = nvc0_vram_init; | ||
470 | engine->vram.takedown = nv50_vram_fini; | ||
471 | engine->vram.get = nvc0_vram_new; | ||
472 | engine->vram.put = nv50_vram_del; | ||
473 | engine->vram.flags_valid = nvc0_vram_flags_valid; | ||
474 | engine->pm.clocks_get = nvc0_pm_clocks_get; | ||
475 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | ||
476 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | ||
425 | break; | 477 | break; |
426 | default: | 478 | default: |
427 | NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); | 479 | NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); |
428 | return 1; | 480 | return 1; |
429 | } | 481 | } |
430 | 482 | ||
483 | /* headless mode */ | ||
484 | if (nouveau_modeset == 2) { | ||
485 | engine->display.early_init = nouveau_stub_init; | ||
486 | engine->display.late_takedown = nouveau_stub_takedown; | ||
487 | engine->display.create = nouveau_stub_init; | ||
488 | engine->display.init = nouveau_stub_init; | ||
489 | engine->display.destroy = nouveau_stub_takedown; | ||
490 | } | ||
491 | |||
431 | return 0; | 492 | return 0; |
432 | } | 493 | } |
433 | 494 | ||
@@ -449,21 +510,6 @@ nouveau_vga_set_decode(void *priv, bool state) | |||
449 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 510 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
450 | } | 511 | } |
451 | 512 | ||
452 | static int | ||
453 | nouveau_card_init_channel(struct drm_device *dev) | ||
454 | { | ||
455 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
456 | int ret; | ||
457 | |||
458 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL, | ||
459 | NvDmaFB, NvDmaTT); | ||
460 | if (ret) | ||
461 | return ret; | ||
462 | |||
463 | mutex_unlock(&dev_priv->channel->mutex); | ||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | static void nouveau_switcheroo_set_state(struct pci_dev *pdev, | 513 | static void nouveau_switcheroo_set_state(struct pci_dev *pdev, |
468 | enum vga_switcheroo_state state) | 514 | enum vga_switcheroo_state state) |
469 | { | 515 | { |
@@ -630,8 +676,11 @@ nouveau_card_init(struct drm_device *dev) | |||
630 | break; | 676 | break; |
631 | } | 677 | } |
632 | 678 | ||
633 | if (dev_priv->card_type == NV_40) | 679 | if (dev_priv->card_type == NV_40 || |
634 | nv40_mpeg_create(dev); | 680 | dev_priv->chipset == 0x31 || |
681 | dev_priv->chipset == 0x34 || | ||
682 | dev_priv->chipset == 0x36) | ||
683 | nv31_mpeg_create(dev); | ||
635 | else | 684 | else |
636 | if (dev_priv->card_type == NV_50 && | 685 | if (dev_priv->card_type == NV_50 && |
637 | (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0)) | 686 | (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0)) |
@@ -651,41 +700,69 @@ nouveau_card_init(struct drm_device *dev) | |||
651 | goto out_engine; | 700 | goto out_engine; |
652 | } | 701 | } |
653 | 702 | ||
654 | ret = engine->display.create(dev); | 703 | ret = nouveau_irq_init(dev); |
655 | if (ret) | 704 | if (ret) |
656 | goto out_fifo; | 705 | goto out_fifo; |
657 | 706 | ||
658 | ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1); | 707 | /* initialise general modesetting */ |
659 | if (ret) | 708 | drm_mode_config_init(dev); |
660 | goto out_vblank; | 709 | drm_mode_create_scaling_mode_property(dev); |
710 | drm_mode_create_dithering_property(dev); | ||
711 | dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs; | ||
712 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1); | ||
713 | dev->mode_config.min_width = 0; | ||
714 | dev->mode_config.min_height = 0; | ||
715 | if (dev_priv->card_type < NV_10) { | ||
716 | dev->mode_config.max_width = 2048; | ||
717 | dev->mode_config.max_height = 2048; | ||
718 | } else | ||
719 | if (dev_priv->card_type < NV_50) { | ||
720 | dev->mode_config.max_width = 4096; | ||
721 | dev->mode_config.max_height = 4096; | ||
722 | } else { | ||
723 | dev->mode_config.max_width = 8192; | ||
724 | dev->mode_config.max_height = 8192; | ||
725 | } | ||
661 | 726 | ||
662 | ret = nouveau_irq_init(dev); | 727 | ret = engine->display.create(dev); |
663 | if (ret) | 728 | if (ret) |
664 | goto out_vblank; | 729 | goto out_irq; |
665 | 730 | ||
666 | /* what about PVIDEO/PCRTC/PRAMDAC etc? */ | 731 | nouveau_backlight_init(dev); |
667 | 732 | ||
668 | if (dev_priv->eng[NVOBJ_ENGINE_GR]) { | 733 | if (dev_priv->eng[NVOBJ_ENGINE_GR]) { |
669 | ret = nouveau_fence_init(dev); | 734 | ret = nouveau_fence_init(dev); |
670 | if (ret) | 735 | if (ret) |
671 | goto out_irq; | 736 | goto out_disp; |
672 | 737 | ||
673 | ret = nouveau_card_init_channel(dev); | 738 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL, |
739 | NvDmaFB, NvDmaTT); | ||
674 | if (ret) | 740 | if (ret) |
675 | goto out_fence; | 741 | goto out_fence; |
742 | |||
743 | mutex_unlock(&dev_priv->channel->mutex); | ||
744 | } | ||
745 | |||
746 | if (dev->mode_config.num_crtc) { | ||
747 | ret = drm_vblank_init(dev, dev->mode_config.num_crtc); | ||
748 | if (ret) | ||
749 | goto out_chan; | ||
750 | |||
751 | nouveau_fbcon_init(dev); | ||
752 | drm_kms_helper_poll_init(dev); | ||
676 | } | 753 | } |
677 | 754 | ||
678 | nouveau_fbcon_init(dev); | ||
679 | drm_kms_helper_poll_init(dev); | ||
680 | return 0; | 755 | return 0; |
681 | 756 | ||
757 | out_chan: | ||
758 | nouveau_channel_put_unlocked(&dev_priv->channel); | ||
682 | out_fence: | 759 | out_fence: |
683 | nouveau_fence_fini(dev); | 760 | nouveau_fence_fini(dev); |
761 | out_disp: | ||
762 | nouveau_backlight_exit(dev); | ||
763 | engine->display.destroy(dev); | ||
684 | out_irq: | 764 | out_irq: |
685 | nouveau_irq_fini(dev); | 765 | nouveau_irq_fini(dev); |
686 | out_vblank: | ||
687 | drm_vblank_cleanup(dev); | ||
688 | engine->display.destroy(dev); | ||
689 | out_fifo: | 766 | out_fifo: |
690 | if (!dev_priv->noaccel) | 767 | if (!dev_priv->noaccel) |
691 | engine->fifo.takedown(dev); | 768 | engine->fifo.takedown(dev); |
@@ -732,15 +809,20 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
732 | struct nouveau_engine *engine = &dev_priv->engine; | 809 | struct nouveau_engine *engine = &dev_priv->engine; |
733 | int e; | 810 | int e; |
734 | 811 | ||
735 | drm_kms_helper_poll_fini(dev); | 812 | if (dev->mode_config.num_crtc) { |
736 | nouveau_fbcon_fini(dev); | 813 | drm_kms_helper_poll_fini(dev); |
814 | nouveau_fbcon_fini(dev); | ||
815 | drm_vblank_cleanup(dev); | ||
816 | } | ||
737 | 817 | ||
738 | if (dev_priv->channel) { | 818 | if (dev_priv->channel) { |
739 | nouveau_channel_put_unlocked(&dev_priv->channel); | 819 | nouveau_channel_put_unlocked(&dev_priv->channel); |
740 | nouveau_fence_fini(dev); | 820 | nouveau_fence_fini(dev); |
741 | } | 821 | } |
742 | 822 | ||
823 | nouveau_backlight_exit(dev); | ||
743 | engine->display.destroy(dev); | 824 | engine->display.destroy(dev); |
825 | drm_mode_config_cleanup(dev); | ||
744 | 826 | ||
745 | if (!dev_priv->noaccel) { | 827 | if (!dev_priv->noaccel) { |
746 | engine->fifo.takedown(dev); | 828 | engine->fifo.takedown(dev); |
@@ -774,7 +856,6 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
774 | engine->vram.takedown(dev); | 856 | engine->vram.takedown(dev); |
775 | 857 | ||
776 | nouveau_irq_fini(dev); | 858 | nouveau_irq_fini(dev); |
777 | drm_vblank_cleanup(dev); | ||
778 | 859 | ||
779 | nouveau_pm_fini(dev); | 860 | nouveau_pm_fini(dev); |
780 | nouveau_bios_takedown(dev); | 861 | nouveau_bios_takedown(dev); |
@@ -907,7 +988,7 @@ static int nouveau_remove_conflicting_drivers(struct drm_device *dev) | |||
907 | int nouveau_load(struct drm_device *dev, unsigned long flags) | 988 | int nouveau_load(struct drm_device *dev, unsigned long flags) |
908 | { | 989 | { |
909 | struct drm_nouveau_private *dev_priv; | 990 | struct drm_nouveau_private *dev_priv; |
910 | uint32_t reg0; | 991 | uint32_t reg0, strap; |
911 | resource_size_t mmio_start_offs; | 992 | resource_size_t mmio_start_offs; |
912 | int ret; | 993 | int ret; |
913 | 994 | ||
@@ -951,13 +1032,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
951 | 1032 | ||
952 | /* Time to determine the card architecture */ | 1033 | /* Time to determine the card architecture */ |
953 | reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); | 1034 | reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); |
954 | dev_priv->stepping = 0; /* XXX: add stepping for pre-NV10? */ | ||
955 | 1035 | ||
956 | /* We're dealing with >=NV10 */ | 1036 | /* We're dealing with >=NV10 */ |
957 | if ((reg0 & 0x0f000000) > 0) { | 1037 | if ((reg0 & 0x0f000000) > 0) { |
958 | /* Bit 27-20 contain the architecture in hex */ | 1038 | /* Bit 27-20 contain the architecture in hex */ |
959 | dev_priv->chipset = (reg0 & 0xff00000) >> 20; | 1039 | dev_priv->chipset = (reg0 & 0xff00000) >> 20; |
960 | dev_priv->stepping = (reg0 & 0xff); | ||
961 | /* NV04 or NV05 */ | 1040 | /* NV04 or NV05 */ |
962 | } else if ((reg0 & 0xff00fff0) == 0x20004000) { | 1041 | } else if ((reg0 & 0xff00fff0) == 0x20004000) { |
963 | if (reg0 & 0x00f00000) | 1042 | if (reg0 & 0x00f00000) |
@@ -987,6 +1066,9 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
987 | case 0xc0: | 1066 | case 0xc0: |
988 | dev_priv->card_type = NV_C0; | 1067 | dev_priv->card_type = NV_C0; |
989 | break; | 1068 | break; |
1069 | case 0xd0: | ||
1070 | dev_priv->card_type = NV_D0; | ||
1071 | break; | ||
990 | default: | 1072 | default: |
991 | NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0); | 1073 | NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0); |
992 | ret = -EINVAL; | 1074 | ret = -EINVAL; |
@@ -996,6 +1078,23 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
996 | NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", | 1078 | NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", |
997 | dev_priv->card_type, reg0); | 1079 | dev_priv->card_type, reg0); |
998 | 1080 | ||
1081 | /* determine frequency of timing crystal */ | ||
1082 | strap = nv_rd32(dev, 0x101000); | ||
1083 | if ( dev_priv->chipset < 0x17 || | ||
1084 | (dev_priv->chipset >= 0x20 && dev_priv->chipset <= 0x25)) | ||
1085 | strap &= 0x00000040; | ||
1086 | else | ||
1087 | strap &= 0x00400040; | ||
1088 | |||
1089 | switch (strap) { | ||
1090 | case 0x00000000: dev_priv->crystal = 13500; break; | ||
1091 | case 0x00000040: dev_priv->crystal = 14318; break; | ||
1092 | case 0x00400000: dev_priv->crystal = 27000; break; | ||
1093 | case 0x00400040: dev_priv->crystal = 25000; break; | ||
1094 | } | ||
1095 | |||
1096 | NV_DEBUG(dev, "crystal freq: %dKHz\n", dev_priv->crystal); | ||
1097 | |||
999 | /* Determine whether we'll attempt acceleration or not, some | 1098 | /* Determine whether we'll attempt acceleration or not, some |
1000 | * cards are disabled by default here due to them being known | 1099 | * cards are disabled by default here due to them being known |
1001 | * non-functional, or never been tested due to lack of hw. | 1100 | * non-functional, or never been tested due to lack of hw. |
@@ -1030,7 +1129,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
1030 | ioremap(pci_resource_start(dev->pdev, ramin_bar), | 1129 | ioremap(pci_resource_start(dev->pdev, ramin_bar), |
1031 | dev_priv->ramin_size); | 1130 | dev_priv->ramin_size); |
1032 | if (!dev_priv->ramin) { | 1131 | if (!dev_priv->ramin) { |
1033 | NV_ERROR(dev, "Failed to PRAMIN BAR"); | 1132 | NV_ERROR(dev, "Failed to map PRAMIN BAR\n"); |
1034 | ret = -ENOMEM; | 1133 | ret = -ENOMEM; |
1035 | goto err_mmio; | 1134 | goto err_mmio; |
1036 | } | 1135 | } |
@@ -1130,7 +1229,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
1130 | getparam->value = 1; | 1229 | getparam->value = 1; |
1131 | break; | 1230 | break; |
1132 | case NOUVEAU_GETPARAM_HAS_PAGEFLIP: | 1231 | case NOUVEAU_GETPARAM_HAS_PAGEFLIP: |
1133 | getparam->value = 1; | 1232 | getparam->value = dev_priv->card_type < NV_D0; |
1134 | break; | 1233 | break; |
1135 | case NOUVEAU_GETPARAM_GRAPH_UNITS: | 1234 | case NOUVEAU_GETPARAM_GRAPH_UNITS: |
1136 | /* NV40 and NV50 versions are quite different, but register | 1235 | /* NV40 and NV50 versions are quite different, but register |
@@ -1198,6 +1297,23 @@ nouveau_wait_ne(struct drm_device *dev, uint64_t timeout, | |||
1198 | return false; | 1297 | return false; |
1199 | } | 1298 | } |
1200 | 1299 | ||
1300 | /* Wait until cond(data) == true, up until timeout has hit */ | ||
1301 | bool | ||
1302 | nouveau_wait_cb(struct drm_device *dev, u64 timeout, | ||
1303 | bool (*cond)(void *), void *data) | ||
1304 | { | ||
1305 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1306 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
1307 | u64 start = ptimer->read(dev); | ||
1308 | |||
1309 | do { | ||
1310 | if (cond(data) == true) | ||
1311 | return true; | ||
1312 | } while (ptimer->read(dev) - start < timeout); | ||
1313 | |||
1314 | return false; | ||
1315 | } | ||
1316 | |||
1201 | /* Waits for PGRAPH to go completely idle */ | 1317 | /* Waits for PGRAPH to go completely idle */ |
1202 | bool nouveau_wait_for_idle(struct drm_device *dev) | 1318 | bool nouveau_wait_for_idle(struct drm_device *dev) |
1203 | { | 1319 | { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c index 244fd38fdb84..ef0832b29ad2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.c +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c | |||
@@ -172,9 +172,9 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) | |||
172 | vm->map_pgt(vpgd->obj, pde, vpgt->obj); | 172 | vm->map_pgt(vpgd->obj, pde, vpgt->obj); |
173 | } | 173 | } |
174 | 174 | ||
175 | mutex_unlock(&vm->mm->mutex); | 175 | mutex_unlock(&vm->mm.mutex); |
176 | nouveau_gpuobj_ref(NULL, &pgt); | 176 | nouveau_gpuobj_ref(NULL, &pgt); |
177 | mutex_lock(&vm->mm->mutex); | 177 | mutex_lock(&vm->mm.mutex); |
178 | } | 178 | } |
179 | } | 179 | } |
180 | 180 | ||
@@ -191,18 +191,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) | |||
191 | pgt_size = (1 << (vm->pgt_bits + 12)) >> type; | 191 | pgt_size = (1 << (vm->pgt_bits + 12)) >> type; |
192 | pgt_size *= 8; | 192 | pgt_size *= 8; |
193 | 193 | ||
194 | mutex_unlock(&vm->mm->mutex); | 194 | mutex_unlock(&vm->mm.mutex); |
195 | ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, | 195 | ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, |
196 | NVOBJ_FLAG_ZERO_ALLOC, &pgt); | 196 | NVOBJ_FLAG_ZERO_ALLOC, &pgt); |
197 | mutex_lock(&vm->mm->mutex); | 197 | mutex_lock(&vm->mm.mutex); |
198 | if (unlikely(ret)) | 198 | if (unlikely(ret)) |
199 | return ret; | 199 | return ret; |
200 | 200 | ||
201 | /* someone beat us to filling the PDE while we didn't have the lock */ | 201 | /* someone beat us to filling the PDE while we didn't have the lock */ |
202 | if (unlikely(vpgt->refcount[big]++)) { | 202 | if (unlikely(vpgt->refcount[big]++)) { |
203 | mutex_unlock(&vm->mm->mutex); | 203 | mutex_unlock(&vm->mm.mutex); |
204 | nouveau_gpuobj_ref(NULL, &pgt); | 204 | nouveau_gpuobj_ref(NULL, &pgt); |
205 | mutex_lock(&vm->mm->mutex); | 205 | mutex_lock(&vm->mm.mutex); |
206 | return 0; | 206 | return 0; |
207 | } | 207 | } |
208 | 208 | ||
@@ -223,10 +223,10 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, | |||
223 | u32 fpde, lpde, pde; | 223 | u32 fpde, lpde, pde; |
224 | int ret; | 224 | int ret; |
225 | 225 | ||
226 | mutex_lock(&vm->mm->mutex); | 226 | mutex_lock(&vm->mm.mutex); |
227 | ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node); | 227 | ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node); |
228 | if (unlikely(ret != 0)) { | 228 | if (unlikely(ret != 0)) { |
229 | mutex_unlock(&vm->mm->mutex); | 229 | mutex_unlock(&vm->mm.mutex); |
230 | return ret; | 230 | return ret; |
231 | } | 231 | } |
232 | 232 | ||
@@ -245,13 +245,13 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, | |||
245 | if (ret) { | 245 | if (ret) { |
246 | if (pde != fpde) | 246 | if (pde != fpde) |
247 | nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); | 247 | nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); |
248 | nouveau_mm_put(vm->mm, vma->node); | 248 | nouveau_mm_put(&vm->mm, vma->node); |
249 | mutex_unlock(&vm->mm->mutex); | 249 | mutex_unlock(&vm->mm.mutex); |
250 | vma->node = NULL; | 250 | vma->node = NULL; |
251 | return ret; | 251 | return ret; |
252 | } | 252 | } |
253 | } | 253 | } |
254 | mutex_unlock(&vm->mm->mutex); | 254 | mutex_unlock(&vm->mm.mutex); |
255 | 255 | ||
256 | vma->vm = vm; | 256 | vma->vm = vm; |
257 | vma->offset = (u64)vma->node->offset << 12; | 257 | vma->offset = (u64)vma->node->offset << 12; |
@@ -270,11 +270,11 @@ nouveau_vm_put(struct nouveau_vma *vma) | |||
270 | fpde = (vma->node->offset >> vm->pgt_bits); | 270 | fpde = (vma->node->offset >> vm->pgt_bits); |
271 | lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; | 271 | lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; |
272 | 272 | ||
273 | mutex_lock(&vm->mm->mutex); | 273 | mutex_lock(&vm->mm.mutex); |
274 | nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); | 274 | nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); |
275 | nouveau_mm_put(vm->mm, vma->node); | 275 | nouveau_mm_put(&vm->mm, vma->node); |
276 | vma->node = NULL; | 276 | vma->node = NULL; |
277 | mutex_unlock(&vm->mm->mutex); | 277 | mutex_unlock(&vm->mm.mutex); |
278 | } | 278 | } |
279 | 279 | ||
280 | int | 280 | int |
@@ -306,7 +306,7 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, | |||
306 | block = length; | 306 | block = length; |
307 | 307 | ||
308 | } else | 308 | } else |
309 | if (dev_priv->card_type == NV_C0) { | 309 | if (dev_priv->card_type >= NV_C0) { |
310 | vm->map_pgt = nvc0_vm_map_pgt; | 310 | vm->map_pgt = nvc0_vm_map_pgt; |
311 | vm->map = nvc0_vm_map; | 311 | vm->map = nvc0_vm_map; |
312 | vm->map_sg = nvc0_vm_map_sg; | 312 | vm->map_sg = nvc0_vm_map_sg; |
@@ -360,11 +360,11 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) | |||
360 | 360 | ||
361 | nouveau_gpuobj_ref(pgd, &vpgd->obj); | 361 | nouveau_gpuobj_ref(pgd, &vpgd->obj); |
362 | 362 | ||
363 | mutex_lock(&vm->mm->mutex); | 363 | mutex_lock(&vm->mm.mutex); |
364 | for (i = vm->fpde; i <= vm->lpde; i++) | 364 | for (i = vm->fpde; i <= vm->lpde; i++) |
365 | vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); | 365 | vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); |
366 | list_add(&vpgd->head, &vm->pgd_list); | 366 | list_add(&vpgd->head, &vm->pgd_list); |
367 | mutex_unlock(&vm->mm->mutex); | 367 | mutex_unlock(&vm->mm.mutex); |
368 | return 0; | 368 | return 0; |
369 | } | 369 | } |
370 | 370 | ||
@@ -377,7 +377,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) | |||
377 | if (!mpgd) | 377 | if (!mpgd) |
378 | return; | 378 | return; |
379 | 379 | ||
380 | mutex_lock(&vm->mm->mutex); | 380 | mutex_lock(&vm->mm.mutex); |
381 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | 381 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { |
382 | if (vpgd->obj == mpgd) { | 382 | if (vpgd->obj == mpgd) { |
383 | pgd = vpgd->obj; | 383 | pgd = vpgd->obj; |
@@ -386,7 +386,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) | |||
386 | break; | 386 | break; |
387 | } | 387 | } |
388 | } | 388 | } |
389 | mutex_unlock(&vm->mm->mutex); | 389 | mutex_unlock(&vm->mm.mutex); |
390 | 390 | ||
391 | nouveau_gpuobj_ref(NULL, &pgd); | 391 | nouveau_gpuobj_ref(NULL, &pgd); |
392 | } | 392 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h index 579ca8cc223c..6ce995f7797e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.h +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h | |||
@@ -51,7 +51,7 @@ struct nouveau_vma { | |||
51 | 51 | ||
52 | struct nouveau_vm { | 52 | struct nouveau_vm { |
53 | struct drm_device *dev; | 53 | struct drm_device *dev; |
54 | struct nouveau_mm *mm; | 54 | struct nouveau_mm mm; |
55 | int refcount; | 55 | int refcount; |
56 | 56 | ||
57 | struct list_head pgd_list; | 57 | struct list_head pgd_list; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c index 75e872741d92..86d03e15735d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_volt.c +++ b/drivers/gpu/drm/nouveau/nouveau_volt.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include "nouveau_drv.h" | 27 | #include "nouveau_drv.h" |
28 | #include "nouveau_pm.h" | 28 | #include "nouveau_pm.h" |
29 | 29 | ||
30 | static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a }; | 30 | static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 }; |
31 | static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]); | 31 | static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]); |
32 | 32 | ||
33 | int | 33 | int |
@@ -170,6 +170,13 @@ nouveau_volt_init(struct drm_device *dev) | |||
170 | */ | 170 | */ |
171 | vidshift = 2; | 171 | vidshift = 2; |
172 | break; | 172 | break; |
173 | case 0x40: | ||
174 | headerlen = volt[1]; | ||
175 | recordlen = volt[2]; | ||
176 | entries = volt[3]; /* not a clue what the entries are for.. */ | ||
177 | vidmask = volt[11]; /* guess.. */ | ||
178 | vidshift = 0; | ||
179 | break; | ||
173 | default: | 180 | default: |
174 | NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]); | 181 | NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]); |
175 | return; | 182 | return; |
@@ -197,16 +204,37 @@ nouveau_volt_init(struct drm_device *dev) | |||
197 | } | 204 | } |
198 | 205 | ||
199 | /* parse vbios entries into common format */ | 206 | /* parse vbios entries into common format */ |
200 | voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL); | 207 | voltage->version = volt[0]; |
201 | if (!voltage->level) | 208 | if (voltage->version < 0x40) { |
202 | return; | 209 | voltage->nr_level = entries; |
210 | voltage->level = | ||
211 | kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL); | ||
212 | if (!voltage->level) | ||
213 | return; | ||
203 | 214 | ||
204 | entry = volt + headerlen; | 215 | entry = volt + headerlen; |
205 | for (i = 0; i < entries; i++, entry += recordlen) { | 216 | for (i = 0; i < entries; i++, entry += recordlen) { |
206 | voltage->level[i].voltage = entry[0]; | 217 | voltage->level[i].voltage = entry[0] * 10000; |
207 | voltage->level[i].vid = entry[1] >> vidshift; | 218 | voltage->level[i].vid = entry[1] >> vidshift; |
219 | } | ||
220 | } else { | ||
221 | u32 volt_uv = ROM32(volt[4]); | ||
222 | s16 step_uv = ROM16(volt[8]); | ||
223 | u8 vid; | ||
224 | |||
225 | voltage->nr_level = voltage->vid_mask + 1; | ||
226 | voltage->level = kcalloc(voltage->nr_level, | ||
227 | sizeof(*voltage->level), GFP_KERNEL); | ||
228 | if (!voltage->level) | ||
229 | return; | ||
230 | |||
231 | for (vid = 0; vid <= voltage->vid_mask; vid++) { | ||
232 | voltage->level[vid].voltage = volt_uv; | ||
233 | voltage->level[vid].vid = vid; | ||
234 | volt_uv += step_uv; | ||
235 | } | ||
208 | } | 236 | } |
209 | voltage->nr_level = entries; | 237 | |
210 | voltage->supported = true; | 238 | voltage->supported = true; |
211 | } | 239 | } |
212 | 240 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c index 1715e1464b7d..6bd8518d7b2e 100644 --- a/drivers/gpu/drm/nouveau/nv04_display.c +++ b/drivers/gpu/drm/nouveau/nv04_display.c | |||
@@ -126,27 +126,6 @@ nv04_display_create(struct drm_device *dev) | |||
126 | 126 | ||
127 | nouveau_hw_save_vga_fonts(dev, 1); | 127 | nouveau_hw_save_vga_fonts(dev, 1); |
128 | 128 | ||
129 | drm_mode_config_init(dev); | ||
130 | drm_mode_create_scaling_mode_property(dev); | ||
131 | drm_mode_create_dithering_property(dev); | ||
132 | |||
133 | dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs; | ||
134 | |||
135 | dev->mode_config.min_width = 0; | ||
136 | dev->mode_config.min_height = 0; | ||
137 | switch (dev_priv->card_type) { | ||
138 | case NV_04: | ||
139 | dev->mode_config.max_width = 2048; | ||
140 | dev->mode_config.max_height = 2048; | ||
141 | break; | ||
142 | default: | ||
143 | dev->mode_config.max_width = 4096; | ||
144 | dev->mode_config.max_height = 4096; | ||
145 | break; | ||
146 | } | ||
147 | |||
148 | dev->mode_config.fb_base = dev_priv->fb_phys; | ||
149 | |||
150 | nv04_crtc_create(dev, 0); | 129 | nv04_crtc_create(dev, 0); |
151 | if (nv_two_heads(dev)) | 130 | if (nv_two_heads(dev)) |
152 | nv04_crtc_create(dev, 1); | 131 | nv04_crtc_create(dev, 1); |
@@ -235,8 +214,6 @@ nv04_display_destroy(struct drm_device *dev) | |||
235 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | 214 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
236 | crtc->funcs->restore(crtc); | 215 | crtc->funcs->restore(crtc); |
237 | 216 | ||
238 | drm_mode_config_cleanup(dev); | ||
239 | |||
240 | nouveau_hw_save_vga_fonts(dev, 0); | 217 | nouveau_hw_save_vga_fonts(dev, 0); |
241 | } | 218 | } |
242 | 219 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c index eb1c70dd82ed..9ae92a87b8cc 100644 --- a/drivers/gpu/drm/nouveau/nv04_pm.c +++ b/drivers/gpu/drm/nouveau/nv04_pm.c | |||
@@ -68,6 +68,7 @@ void | |||
68 | nv04_pm_clock_set(struct drm_device *dev, void *pre_state) | 68 | nv04_pm_clock_set(struct drm_device *dev, void *pre_state) |
69 | { | 69 | { |
70 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 70 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
71 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
71 | struct nv04_pm_state *state = pre_state; | 72 | struct nv04_pm_state *state = pre_state; |
72 | u32 reg = state->pll.reg; | 73 | u32 reg = state->pll.reg; |
73 | 74 | ||
@@ -85,6 +86,9 @@ nv04_pm_clock_set(struct drm_device *dev, void *pre_state) | |||
85 | nv_mask(dev, 0x1002c0, 0, 1 << 8); | 86 | nv_mask(dev, 0x1002c0, 0, 1 << 8); |
86 | } | 87 | } |
87 | 88 | ||
89 | if (reg == NV_PRAMDAC_NVPLL_COEFF) | ||
90 | ptimer->init(dev); | ||
91 | |||
88 | kfree(state); | 92 | kfree(state); |
89 | } | 93 | } |
90 | 94 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c index 1d09ddd57399..263301b809dd 100644 --- a/drivers/gpu/drm/nouveau/nv04_timer.c +++ b/drivers/gpu/drm/nouveau/nv04_timer.c | |||
@@ -6,43 +6,75 @@ | |||
6 | int | 6 | int |
7 | nv04_timer_init(struct drm_device *dev) | 7 | nv04_timer_init(struct drm_device *dev) |
8 | { | 8 | { |
9 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
10 | u32 m, n, d; | ||
11 | |||
9 | nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000); | 12 | nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000); |
10 | nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF); | 13 | nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF); |
11 | 14 | ||
12 | /* Just use the pre-existing values when possible for now; these regs | 15 | /* aim for 31.25MHz, which gives us nanosecond timestamps */ |
13 | * are not written in nv (driver writer missed a /4 on the address), and | 16 | d = 1000000 / 32; |
14 | * writing 8 and 3 to the correct regs breaks the timings on the LVDS | 17 | |
15 | * hardware sequencing microcode. | 18 | /* determine base clock for timer source */ |
16 | * A correct solution (involving calculations with the GPU PLL) can | 19 | if (dev_priv->chipset < 0x40) { |
17 | * be done when kernel modesetting lands | 20 | n = dev_priv->engine.pm.clock_get(dev, PLL_CORE); |
18 | */ | 21 | } else |
19 | if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) || | 22 | if (dev_priv->chipset == 0x40) { |
20 | !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) { | 23 | /*XXX: figure this out */ |
21 | nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008); | 24 | n = 0; |
22 | nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003); | 25 | } else { |
26 | n = dev_priv->crystal; | ||
27 | m = 1; | ||
28 | while (n < (d * 2)) { | ||
29 | n += (n / m); | ||
30 | m++; | ||
31 | } | ||
32 | |||
33 | nv_wr32(dev, 0x009220, m - 1); | ||
34 | } | ||
35 | |||
36 | if (!n) { | ||
37 | NV_WARN(dev, "PTIMER: unknown input clock freq\n"); | ||
38 | if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) || | ||
39 | !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) { | ||
40 | nv_wr32(dev, NV04_PTIMER_NUMERATOR, 1); | ||
41 | nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 1); | ||
42 | } | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | /* reduce ratio to acceptable values */ | ||
47 | while (((n % 5) == 0) && ((d % 5) == 0)) { | ||
48 | n /= 5; | ||
49 | d /= 5; | ||
23 | } | 50 | } |
24 | 51 | ||
52 | while (((n % 2) == 0) && ((d % 2) == 0)) { | ||
53 | n /= 2; | ||
54 | d /= 2; | ||
55 | } | ||
56 | |||
57 | while (n > 0xffff || d > 0xffff) { | ||
58 | n >>= 1; | ||
59 | d >>= 1; | ||
60 | } | ||
61 | |||
62 | nv_wr32(dev, NV04_PTIMER_NUMERATOR, n); | ||
63 | nv_wr32(dev, NV04_PTIMER_DENOMINATOR, d); | ||
25 | return 0; | 64 | return 0; |
26 | } | 65 | } |
27 | 66 | ||
28 | uint64_t | 67 | u64 |
29 | nv04_timer_read(struct drm_device *dev) | 68 | nv04_timer_read(struct drm_device *dev) |
30 | { | 69 | { |
31 | uint32_t low; | 70 | u32 hi, lo; |
32 | /* From kmmio dumps on nv28 this looks like how the blob does this. | 71 | |
33 | * It reads the high dword twice, before and after. | ||
34 | * The only explanation seems to be that the 64-bit timer counter | ||
35 | * advances between high and low dword reads and may corrupt the | ||
36 | * result. Not confirmed. | ||
37 | */ | ||
38 | uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1); | ||
39 | uint32_t high1; | ||
40 | do { | 72 | do { |
41 | high1 = high2; | 73 | hi = nv_rd32(dev, NV04_PTIMER_TIME_1); |
42 | low = nv_rd32(dev, NV04_PTIMER_TIME_0); | 74 | lo = nv_rd32(dev, NV04_PTIMER_TIME_0); |
43 | high2 = nv_rd32(dev, NV04_PTIMER_TIME_1); | 75 | } while (hi != nv_rd32(dev, NV04_PTIMER_TIME_1)); |
44 | } while (high1 != high2); | 76 | |
45 | return (((uint64_t)high2) << 32) | (uint64_t)low; | 77 | return ((u64)hi << 32 | lo); |
46 | } | 78 | } |
47 | 79 | ||
48 | void | 80 | void |
diff --git a/drivers/gpu/drm/nouveau/nv40_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c index ad03a0e1fc7d..6f06a0713f00 100644 --- a/drivers/gpu/drm/nouveau/nv40_mpeg.c +++ b/drivers/gpu/drm/nouveau/nv31_mpeg.c | |||
@@ -26,10 +26,32 @@ | |||
26 | #include "nouveau_drv.h" | 26 | #include "nouveau_drv.h" |
27 | #include "nouveau_ramht.h" | 27 | #include "nouveau_ramht.h" |
28 | 28 | ||
29 | struct nv40_mpeg_engine { | 29 | struct nv31_mpeg_engine { |
30 | struct nouveau_exec_engine base; | 30 | struct nouveau_exec_engine base; |
31 | atomic_t refcount; | ||
31 | }; | 32 | }; |
32 | 33 | ||
34 | |||
35 | static int | ||
36 | nv31_mpeg_context_new(struct nouveau_channel *chan, int engine) | ||
37 | { | ||
38 | struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine); | ||
39 | |||
40 | if (!atomic_add_unless(&pmpeg->refcount, 1, 1)) | ||
41 | return -EBUSY; | ||
42 | |||
43 | chan->engctx[engine] = (void *)0xdeadcafe; | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | static void | ||
48 | nv31_mpeg_context_del(struct nouveau_channel *chan, int engine) | ||
49 | { | ||
50 | struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine); | ||
51 | atomic_dec(&pmpeg->refcount); | ||
52 | chan->engctx[engine] = NULL; | ||
53 | } | ||
54 | |||
33 | static int | 55 | static int |
34 | nv40_mpeg_context_new(struct nouveau_channel *chan, int engine) | 56 | nv40_mpeg_context_new(struct nouveau_channel *chan, int engine) |
35 | { | 57 | { |
@@ -81,7 +103,7 @@ nv40_mpeg_context_del(struct nouveau_channel *chan, int engine) | |||
81 | } | 103 | } |
82 | 104 | ||
83 | static int | 105 | static int |
84 | nv40_mpeg_object_new(struct nouveau_channel *chan, int engine, | 106 | nv31_mpeg_object_new(struct nouveau_channel *chan, int engine, |
85 | u32 handle, u16 class) | 107 | u32 handle, u16 class) |
86 | { | 108 | { |
87 | struct drm_device *dev = chan->dev; | 109 | struct drm_device *dev = chan->dev; |
@@ -103,10 +125,10 @@ nv40_mpeg_object_new(struct nouveau_channel *chan, int engine, | |||
103 | } | 125 | } |
104 | 126 | ||
105 | static int | 127 | static int |
106 | nv40_mpeg_init(struct drm_device *dev, int engine) | 128 | nv31_mpeg_init(struct drm_device *dev, int engine) |
107 | { | 129 | { |
108 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 130 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
109 | struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine); | 131 | struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine); |
110 | int i; | 132 | int i; |
111 | 133 | ||
112 | /* VPE init */ | 134 | /* VPE init */ |
@@ -121,7 +143,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine) | |||
121 | /* PMPEG init */ | 143 | /* PMPEG init */ |
122 | nv_wr32(dev, 0x00b32c, 0x00000000); | 144 | nv_wr32(dev, 0x00b32c, 0x00000000); |
123 | nv_wr32(dev, 0x00b314, 0x00000100); | 145 | nv_wr32(dev, 0x00b314, 0x00000100); |
124 | nv_wr32(dev, 0x00b220, 0x00000044); | 146 | nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031); |
125 | nv_wr32(dev, 0x00b300, 0x02001ec1); | 147 | nv_wr32(dev, 0x00b300, 0x02001ec1); |
126 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); | 148 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); |
127 | 149 | ||
@@ -137,7 +159,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine) | |||
137 | } | 159 | } |
138 | 160 | ||
139 | static int | 161 | static int |
140 | nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend) | 162 | nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend) |
141 | { | 163 | { |
142 | /*XXX: context save? */ | 164 | /*XXX: context save? */ |
143 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); | 165 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); |
@@ -146,7 +168,7 @@ nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend) | |||
146 | } | 168 | } |
147 | 169 | ||
148 | static int | 170 | static int |
149 | nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) | 171 | nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) |
150 | { | 172 | { |
151 | struct drm_device *dev = chan->dev; | 173 | struct drm_device *dev = chan->dev; |
152 | u32 inst = data << 4; | 174 | u32 inst = data << 4; |
@@ -184,13 +206,17 @@ nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) | |||
184 | } | 206 | } |
185 | 207 | ||
186 | static int | 208 | static int |
187 | nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst) | 209 | nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst) |
188 | { | 210 | { |
189 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 211 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
190 | struct nouveau_gpuobj *ctx; | 212 | struct nouveau_gpuobj *ctx; |
191 | unsigned long flags; | 213 | unsigned long flags; |
192 | int i; | 214 | int i; |
193 | 215 | ||
216 | /* hardcode drm channel id on nv3x, so swmthd lookup works */ | ||
217 | if (dev_priv->card_type < NV_40) | ||
218 | return 0; | ||
219 | |||
194 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 220 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
195 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 221 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
196 | if (!dev_priv->channels.ptr[i]) | 222 | if (!dev_priv->channels.ptr[i]) |
@@ -205,7 +231,7 @@ nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst) | |||
205 | } | 231 | } |
206 | 232 | ||
207 | static void | 233 | static void |
208 | nv40_vpe_set_tile_region(struct drm_device *dev, int i) | 234 | nv31_vpe_set_tile_region(struct drm_device *dev, int i) |
209 | { | 235 | { |
210 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 236 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
211 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | 237 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
@@ -216,10 +242,10 @@ nv40_vpe_set_tile_region(struct drm_device *dev, int i) | |||
216 | } | 242 | } |
217 | 243 | ||
218 | static void | 244 | static void |
219 | nv40_mpeg_isr(struct drm_device *dev) | 245 | nv31_mpeg_isr(struct drm_device *dev) |
220 | { | 246 | { |
221 | u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4; | 247 | u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4; |
222 | u32 chid = nv40_mpeg_isr_chid(dev, inst); | 248 | u32 chid = nv31_mpeg_isr_chid(dev, inst); |
223 | u32 stat = nv_rd32(dev, 0x00b100); | 249 | u32 stat = nv_rd32(dev, 0x00b100); |
224 | u32 type = nv_rd32(dev, 0x00b230); | 250 | u32 type = nv_rd32(dev, 0x00b230); |
225 | u32 mthd = nv_rd32(dev, 0x00b234); | 251 | u32 mthd = nv_rd32(dev, 0x00b234); |
@@ -249,10 +275,10 @@ nv40_mpeg_isr(struct drm_device *dev) | |||
249 | } | 275 | } |
250 | 276 | ||
251 | static void | 277 | static void |
252 | nv40_vpe_isr(struct drm_device *dev) | 278 | nv31_vpe_isr(struct drm_device *dev) |
253 | { | 279 | { |
254 | if (nv_rd32(dev, 0x00b100)) | 280 | if (nv_rd32(dev, 0x00b100)) |
255 | nv40_mpeg_isr(dev); | 281 | nv31_mpeg_isr(dev); |
256 | 282 | ||
257 | if (nv_rd32(dev, 0x00b800)) { | 283 | if (nv_rd32(dev, 0x00b800)) { |
258 | u32 stat = nv_rd32(dev, 0x00b800); | 284 | u32 stat = nv_rd32(dev, 0x00b800); |
@@ -262,9 +288,9 @@ nv40_vpe_isr(struct drm_device *dev) | |||
262 | } | 288 | } |
263 | 289 | ||
264 | static void | 290 | static void |
265 | nv40_mpeg_destroy(struct drm_device *dev, int engine) | 291 | nv31_mpeg_destroy(struct drm_device *dev, int engine) |
266 | { | 292 | { |
267 | struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine); | 293 | struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine); |
268 | 294 | ||
269 | nouveau_irq_unregister(dev, 0); | 295 | nouveau_irq_unregister(dev, 0); |
270 | 296 | ||
@@ -273,34 +299,41 @@ nv40_mpeg_destroy(struct drm_device *dev, int engine) | |||
273 | } | 299 | } |
274 | 300 | ||
275 | int | 301 | int |
276 | nv40_mpeg_create(struct drm_device *dev) | 302 | nv31_mpeg_create(struct drm_device *dev) |
277 | { | 303 | { |
278 | struct nv40_mpeg_engine *pmpeg; | 304 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
305 | struct nv31_mpeg_engine *pmpeg; | ||
279 | 306 | ||
280 | pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL); | 307 | pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL); |
281 | if (!pmpeg) | 308 | if (!pmpeg) |
282 | return -ENOMEM; | 309 | return -ENOMEM; |
283 | 310 | atomic_set(&pmpeg->refcount, 0); | |
284 | pmpeg->base.destroy = nv40_mpeg_destroy; | 311 | |
285 | pmpeg->base.init = nv40_mpeg_init; | 312 | pmpeg->base.destroy = nv31_mpeg_destroy; |
286 | pmpeg->base.fini = nv40_mpeg_fini; | 313 | pmpeg->base.init = nv31_mpeg_init; |
287 | pmpeg->base.context_new = nv40_mpeg_context_new; | 314 | pmpeg->base.fini = nv31_mpeg_fini; |
288 | pmpeg->base.context_del = nv40_mpeg_context_del; | 315 | if (dev_priv->card_type < NV_40) { |
289 | pmpeg->base.object_new = nv40_mpeg_object_new; | 316 | pmpeg->base.context_new = nv31_mpeg_context_new; |
317 | pmpeg->base.context_del = nv31_mpeg_context_del; | ||
318 | } else { | ||
319 | pmpeg->base.context_new = nv40_mpeg_context_new; | ||
320 | pmpeg->base.context_del = nv40_mpeg_context_del; | ||
321 | } | ||
322 | pmpeg->base.object_new = nv31_mpeg_object_new; | ||
290 | 323 | ||
291 | /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between | 324 | /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between |
292 | * all VPE engines, for this driver's purposes the PMPEG engine | 325 | * all VPE engines, for this driver's purposes the PMPEG engine |
293 | * will be treated as the "master" and handle the global VPE | 326 | * will be treated as the "master" and handle the global VPE |
294 | * bits too | 327 | * bits too |
295 | */ | 328 | */ |
296 | pmpeg->base.set_tile_region = nv40_vpe_set_tile_region; | 329 | pmpeg->base.set_tile_region = nv31_vpe_set_tile_region; |
297 | nouveau_irq_register(dev, 0, nv40_vpe_isr); | 330 | nouveau_irq_register(dev, 0, nv31_vpe_isr); |
298 | 331 | ||
299 | NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base); | 332 | NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base); |
300 | NVOBJ_CLASS(dev, 0x3174, MPEG); | 333 | NVOBJ_CLASS(dev, 0x3174, MPEG); |
301 | NVOBJ_MTHD (dev, 0x3174, 0x0190, nv40_mpeg_mthd_dma); | 334 | NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma); |
302 | NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv40_mpeg_mthd_dma); | 335 | NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma); |
303 | NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv40_mpeg_mthd_dma); | 336 | NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma); |
304 | 337 | ||
305 | #if 0 | 338 | #if 0 |
306 | NVOBJ_ENGINE_ADD(dev, ME, &pme->base); | 339 | NVOBJ_ENGINE_ADD(dev, ME, &pme->base); |
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c new file mode 100644 index 000000000000..bbc0b9c7e1f7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_pm.c | |||
@@ -0,0 +1,338 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_bios.h" | ||
28 | #include "nouveau_pm.h" | ||
29 | #include "nouveau_hw.h" | ||
30 | |||
31 | #define min2(a,b) ((a) < (b) ? (a) : (b)) | ||
32 | |||
33 | static u32 | ||
34 | read_pll_1(struct drm_device *dev, u32 reg) | ||
35 | { | ||
36 | u32 ctrl = nv_rd32(dev, reg + 0x00); | ||
37 | int P = (ctrl & 0x00070000) >> 16; | ||
38 | int N = (ctrl & 0x0000ff00) >> 8; | ||
39 | int M = (ctrl & 0x000000ff) >> 0; | ||
40 | u32 ref = 27000, clk = 0; | ||
41 | |||
42 | if (ctrl & 0x80000000) | ||
43 | clk = ref * N / M; | ||
44 | |||
45 | return clk >> P; | ||
46 | } | ||
47 | |||
48 | static u32 | ||
49 | read_pll_2(struct drm_device *dev, u32 reg) | ||
50 | { | ||
51 | u32 ctrl = nv_rd32(dev, reg + 0x00); | ||
52 | u32 coef = nv_rd32(dev, reg + 0x04); | ||
53 | int N2 = (coef & 0xff000000) >> 24; | ||
54 | int M2 = (coef & 0x00ff0000) >> 16; | ||
55 | int N1 = (coef & 0x0000ff00) >> 8; | ||
56 | int M1 = (coef & 0x000000ff) >> 0; | ||
57 | int P = (ctrl & 0x00070000) >> 16; | ||
58 | u32 ref = 27000, clk = 0; | ||
59 | |||
60 | if (ctrl & 0x80000000) | ||
61 | clk = ref * N1 / M1; | ||
62 | |||
63 | if (!(ctrl & 0x00000100)) { | ||
64 | if (ctrl & 0x40000000) | ||
65 | clk = clk * N2 / M2; | ||
66 | } | ||
67 | |||
68 | return clk >> P; | ||
69 | } | ||
70 | |||
71 | static u32 | ||
72 | read_clk(struct drm_device *dev, u32 src) | ||
73 | { | ||
74 | switch (src) { | ||
75 | case 3: | ||
76 | return read_pll_2(dev, 0x004000); | ||
77 | case 2: | ||
78 | return read_pll_1(dev, 0x004008); | ||
79 | default: | ||
80 | break; | ||
81 | } | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | int | ||
87 | nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) | ||
88 | { | ||
89 | u32 ctrl = nv_rd32(dev, 0x00c040); | ||
90 | |||
91 | perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0); | ||
92 | perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4); | ||
93 | perflvl->memory = read_pll_2(dev, 0x4020); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | struct nv40_pm_state { | ||
98 | u32 ctrl; | ||
99 | u32 npll_ctrl; | ||
100 | u32 npll_coef; | ||
101 | u32 spll; | ||
102 | u32 mpll_ctrl; | ||
103 | u32 mpll_coef; | ||
104 | }; | ||
105 | |||
106 | static int | ||
107 | nv40_calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll, | ||
108 | u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P) | ||
109 | { | ||
110 | struct nouveau_pll_vals coef; | ||
111 | int ret; | ||
112 | |||
113 | ret = get_pll_limits(dev, reg, pll); | ||
114 | if (ret) | ||
115 | return ret; | ||
116 | |||
117 | if (clk < pll->vco1.maxfreq) | ||
118 | pll->vco2.maxfreq = 0; | ||
119 | |||
120 | ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef); | ||
121 | if (ret == 0) | ||
122 | return -ERANGE; | ||
123 | |||
124 | *N1 = coef.N1; | ||
125 | *M1 = coef.M1; | ||
126 | if (N2 && M2) { | ||
127 | if (pll->vco2.maxfreq) { | ||
128 | *N2 = coef.N2; | ||
129 | *M2 = coef.M2; | ||
130 | } else { | ||
131 | *N2 = 1; | ||
132 | *M2 = 1; | ||
133 | } | ||
134 | } | ||
135 | *log2P = coef.log2P; | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | void * | ||
140 | nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) | ||
141 | { | ||
142 | struct nv40_pm_state *info; | ||
143 | struct pll_lims pll; | ||
144 | int N1, N2, M1, M2, log2P; | ||
145 | int ret; | ||
146 | |||
147 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
148 | if (!info) | ||
149 | return ERR_PTR(-ENOMEM); | ||
150 | |||
151 | /* core/geometric clock */ | ||
152 | ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core, | ||
153 | &N1, &M1, &N2, &M2, &log2P); | ||
154 | if (ret < 0) | ||
155 | goto out; | ||
156 | |||
157 | if (N2 == M2) { | ||
158 | info->npll_ctrl = 0x80000100 | (log2P << 16); | ||
159 | info->npll_coef = (N1 << 8) | M1; | ||
160 | } else { | ||
161 | info->npll_ctrl = 0xc0000000 | (log2P << 16); | ||
162 | info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1; | ||
163 | } | ||
164 | |||
165 | /* use the second PLL for shader/rop clock, if it differs from core */ | ||
166 | if (perflvl->shader && perflvl->shader != perflvl->core) { | ||
167 | ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader, | ||
168 | &N1, &M1, NULL, NULL, &log2P); | ||
169 | if (ret < 0) | ||
170 | goto out; | ||
171 | |||
172 | info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1; | ||
173 | info->ctrl = 0x00000223; | ||
174 | } else { | ||
175 | info->spll = 0x00000000; | ||
176 | info->ctrl = 0x00000333; | ||
177 | } | ||
178 | |||
179 | /* memory clock */ | ||
180 | ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, | ||
181 | &N1, &M1, &N2, &M2, &log2P); | ||
182 | if (ret < 0) | ||
183 | goto out; | ||
184 | |||
185 | info->mpll_ctrl = 0x80000000 | (log2P << 16); | ||
186 | info->mpll_ctrl |= min2(pll.log2p_bias + log2P, pll.max_log2p) << 20; | ||
187 | if (N2 == M2) { | ||
188 | info->mpll_ctrl |= 0x00000100; | ||
189 | info->mpll_coef = (N1 << 8) | M1; | ||
190 | } else { | ||
191 | info->mpll_ctrl |= 0x40000000; | ||
192 | info->mpll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1; | ||
193 | } | ||
194 | |||
195 | out: | ||
196 | if (ret < 0) { | ||
197 | kfree(info); | ||
198 | info = ERR_PTR(ret); | ||
199 | } | ||
200 | return info; | ||
201 | } | ||
202 | |||
203 | static bool | ||
204 | nv40_pm_gr_idle(void *data) | ||
205 | { | ||
206 | struct drm_device *dev = data; | ||
207 | |||
208 | if ((nv_rd32(dev, 0x400760) & 0x000000f0) >> 4 != | ||
209 | (nv_rd32(dev, 0x400760) & 0x0000000f)) | ||
210 | return false; | ||
211 | |||
212 | if (nv_rd32(dev, 0x400700)) | ||
213 | return false; | ||
214 | |||
215 | return true; | ||
216 | } | ||
217 | |||
218 | void | ||
219 | nv40_pm_clocks_set(struct drm_device *dev, void *pre_state) | ||
220 | { | ||
221 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
222 | struct nv40_pm_state *info = pre_state; | ||
223 | unsigned long flags; | ||
224 | struct bit_entry M; | ||
225 | u32 crtc_mask = 0; | ||
226 | u8 sr1[2]; | ||
227 | int i; | ||
228 | |||
229 | /* determine which CRTCs are active, fetch VGA_SR1 for each */ | ||
230 | for (i = 0; i < 2; i++) { | ||
231 | u32 vbl = nv_rd32(dev, 0x600808 + (i * 0x2000)); | ||
232 | u32 cnt = 0; | ||
233 | do { | ||
234 | if (vbl != nv_rd32(dev, 0x600808 + (i * 0x2000))) { | ||
235 | nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01); | ||
236 | sr1[i] = nv_rd08(dev, 0x0c03c5 + (i * 0x2000)); | ||
237 | if (!(sr1[i] & 0x20)) | ||
238 | crtc_mask |= (1 << i); | ||
239 | break; | ||
240 | } | ||
241 | udelay(1); | ||
242 | } while (cnt++ < 32); | ||
243 | } | ||
244 | |||
245 | /* halt and idle engines */ | ||
246 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
247 | nv_mask(dev, 0x002500, 0x00000001, 0x00000000); | ||
248 | if (!nv_wait(dev, 0x002500, 0x00000010, 0x00000000)) | ||
249 | goto resume; | ||
250 | nv_mask(dev, 0x003220, 0x00000001, 0x00000000); | ||
251 | if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000)) | ||
252 | goto resume; | ||
253 | nv_mask(dev, 0x003200, 0x00000001, 0x00000000); | ||
254 | nv04_fifo_cache_pull(dev, false); | ||
255 | |||
256 | if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev)) | ||
257 | goto resume; | ||
258 | |||
259 | /* set engine clocks */ | ||
260 | nv_mask(dev, 0x00c040, 0x00000333, 0x00000000); | ||
261 | nv_wr32(dev, 0x004004, info->npll_coef); | ||
262 | nv_mask(dev, 0x004000, 0xc0070100, info->npll_ctrl); | ||
263 | nv_mask(dev, 0x004008, 0xc007ffff, info->spll); | ||
264 | mdelay(5); | ||
265 | nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); | ||
266 | |||
267 | /* wait for vblank start on active crtcs, disable memory access */ | ||
268 | for (i = 0; i < 2; i++) { | ||
269 | if (!(crtc_mask & (1 << i))) | ||
270 | continue; | ||
271 | nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000); | ||
272 | nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000); | ||
273 | nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01); | ||
274 | nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20); | ||
275 | } | ||
276 | |||
277 | /* prepare ram for reclocking */ | ||
278 | nv_wr32(dev, 0x1002d4, 0x00000001); /* precharge */ | ||
279 | nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */ | ||
280 | nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */ | ||
281 | nv_mask(dev, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */ | ||
282 | nv_wr32(dev, 0x1002dc, 0x00000001); /* enable self-refresh */ | ||
283 | |||
284 | /* change the PLL of each memory partition */ | ||
285 | nv_mask(dev, 0x00c040, 0x0000c000, 0x00000000); | ||
286 | switch (dev_priv->chipset) { | ||
287 | case 0x40: | ||
288 | case 0x45: | ||
289 | case 0x41: | ||
290 | case 0x42: | ||
291 | case 0x47: | ||
292 | nv_mask(dev, 0x004044, 0xc0771100, info->mpll_ctrl); | ||
293 | nv_mask(dev, 0x00402c, 0xc0771100, info->mpll_ctrl); | ||
294 | nv_wr32(dev, 0x004048, info->mpll_coef); | ||
295 | nv_wr32(dev, 0x004030, info->mpll_coef); | ||
296 | case 0x43: | ||
297 | case 0x49: | ||
298 | case 0x4b: | ||
299 | nv_mask(dev, 0x004038, 0xc0771100, info->mpll_ctrl); | ||
300 | nv_wr32(dev, 0x00403c, info->mpll_coef); | ||
301 | default: | ||
302 | nv_mask(dev, 0x004020, 0xc0771100, info->mpll_ctrl); | ||
303 | nv_wr32(dev, 0x004024, info->mpll_coef); | ||
304 | break; | ||
305 | } | ||
306 | udelay(100); | ||
307 | nv_mask(dev, 0x00c040, 0x0000c000, 0x0000c000); | ||
308 | |||
309 | /* re-enable normal operation of memory controller */ | ||
310 | nv_wr32(dev, 0x1002dc, 0x00000000); | ||
311 | nv_mask(dev, 0x100210, 0x80000000, 0x80000000); | ||
312 | udelay(100); | ||
313 | |||
314 | /* execute memory reset script from vbios */ | ||
315 | if (!bit_table(dev, 'M', &M)) | ||
316 | nouveau_bios_init_exec(dev, ROM16(M.data[0])); | ||
317 | |||
318 | /* make sure we're in vblank (hopefully the same one as before), and | ||
319 | * then re-enable crtc memory access | ||
320 | */ | ||
321 | for (i = 0; i < 2; i++) { | ||
322 | if (!(crtc_mask & (1 << i))) | ||
323 | continue; | ||
324 | nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000); | ||
325 | nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01); | ||
326 | nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i]); | ||
327 | } | ||
328 | |||
329 | /* resume engines */ | ||
330 | resume: | ||
331 | nv_wr32(dev, 0x003250, 0x00000001); | ||
332 | nv_mask(dev, 0x003220, 0x00000001, 0x00000001); | ||
333 | nv_wr32(dev, 0x003200, 0x00000001); | ||
334 | nv_wr32(dev, 0x002500, 0x00000001); | ||
335 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
336 | |||
337 | kfree(info); | ||
338 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 5d989073ba6e..882080e0b4f5 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -329,8 +329,6 @@ nv50_crtc_destroy(struct drm_crtc *crtc) | |||
329 | 329 | ||
330 | drm_crtc_cleanup(&nv_crtc->base); | 330 | drm_crtc_cleanup(&nv_crtc->base); |
331 | 331 | ||
332 | nv50_cursor_fini(nv_crtc); | ||
333 | |||
334 | nouveau_bo_unmap(nv_crtc->lut.nvbo); | 332 | nouveau_bo_unmap(nv_crtc->lut.nvbo); |
335 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); | 333 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); |
336 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | 334 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); |
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c index 9752c35bb84b..adfc9b607a50 100644 --- a/drivers/gpu/drm/nouveau/nv50_cursor.c +++ b/drivers/gpu/drm/nouveau/nv50_cursor.c | |||
@@ -137,21 +137,3 @@ nv50_cursor_init(struct nouveau_crtc *nv_crtc) | |||
137 | nv_crtc->cursor.show = nv50_cursor_show; | 137 | nv_crtc->cursor.show = nv50_cursor_show; |
138 | return 0; | 138 | return 0; |
139 | } | 139 | } |
140 | |||
141 | void | ||
142 | nv50_cursor_fini(struct nouveau_crtc *nv_crtc) | ||
143 | { | ||
144 | struct drm_device *dev = nv_crtc->base.dev; | ||
145 | int idx = nv_crtc->index; | ||
146 | |||
147 | NV_DEBUG_KMS(dev, "\n"); | ||
148 | |||
149 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0); | ||
150 | if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), | ||
151 | NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { | ||
152 | NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); | ||
153 | NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", | ||
154 | nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx))); | ||
155 | } | ||
156 | } | ||
157 | |||
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index db1a5f4b711d..d23ca00e7d62 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -247,6 +247,16 @@ static int nv50_display_disable(struct drm_device *dev) | |||
247 | } | 247 | } |
248 | } | 248 | } |
249 | 249 | ||
250 | for (i = 0; i < 2; i++) { | ||
251 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0); | ||
252 | if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), | ||
253 | NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { | ||
254 | NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); | ||
255 | NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", | ||
256 | nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); | ||
257 | } | ||
258 | } | ||
259 | |||
250 | nv50_evo_fini(dev); | 260 | nv50_evo_fini(dev); |
251 | 261 | ||
252 | for (i = 0; i < 3; i++) { | 262 | for (i = 0; i < 3; i++) { |
@@ -286,23 +296,6 @@ int nv50_display_create(struct drm_device *dev) | |||
286 | return -ENOMEM; | 296 | return -ENOMEM; |
287 | dev_priv->engine.display.priv = priv; | 297 | dev_priv->engine.display.priv = priv; |
288 | 298 | ||
289 | /* init basic kernel modesetting */ | ||
290 | drm_mode_config_init(dev); | ||
291 | |||
292 | /* Initialise some optional connector properties. */ | ||
293 | drm_mode_create_scaling_mode_property(dev); | ||
294 | drm_mode_create_dithering_property(dev); | ||
295 | |||
296 | dev->mode_config.min_width = 0; | ||
297 | dev->mode_config.min_height = 0; | ||
298 | |||
299 | dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs; | ||
300 | |||
301 | dev->mode_config.max_width = 8192; | ||
302 | dev->mode_config.max_height = 8192; | ||
303 | |||
304 | dev->mode_config.fb_base = dev_priv->fb_phys; | ||
305 | |||
306 | /* Create CRTC objects */ | 299 | /* Create CRTC objects */ |
307 | for (i = 0; i < 2; i++) | 300 | for (i = 0; i < 2; i++) |
308 | nv50_crtc_create(dev, i); | 301 | nv50_crtc_create(dev, i); |
@@ -364,8 +357,6 @@ nv50_display_destroy(struct drm_device *dev) | |||
364 | 357 | ||
365 | NV_DEBUG_KMS(dev, "\n"); | 358 | NV_DEBUG_KMS(dev, "\n"); |
366 | 359 | ||
367 | drm_mode_config_cleanup(dev); | ||
368 | |||
369 | nv50_display_disable(dev); | 360 | nv50_display_disable(dev); |
370 | nouveau_irq_unregister(dev, 26); | 361 | nouveau_irq_unregister(dev, 26); |
371 | kfree(disp); | 362 | kfree(disp); |
@@ -698,7 +689,7 @@ nv50_display_unk10_handler(struct drm_device *dev) | |||
698 | struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; | 689 | struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; |
699 | 690 | ||
700 | if (dcb->type == type && (dcb->or & (1 << or))) { | 691 | if (dcb->type == type && (dcb->or & (1 << or))) { |
701 | nouveau_bios_run_display_table(dev, dcb, 0, -1); | 692 | nouveau_bios_run_display_table(dev, 0, -1, dcb, -1); |
702 | disp->irq.dcb = dcb; | 693 | disp->irq.dcb = dcb; |
703 | goto ack; | 694 | goto ack; |
704 | } | 695 | } |
@@ -711,37 +702,6 @@ ack: | |||
711 | } | 702 | } |
712 | 703 | ||
713 | static void | 704 | static void |
714 | nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb) | ||
715 | { | ||
716 | int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); | ||
717 | struct drm_encoder *encoder; | ||
718 | uint32_t tmp, unk0 = 0, unk1 = 0; | ||
719 | |||
720 | if (dcb->type != OUTPUT_DP) | ||
721 | return; | ||
722 | |||
723 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
724 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
725 | |||
726 | if (nv_encoder->dcb == dcb) { | ||
727 | unk0 = nv_encoder->dp.unk0; | ||
728 | unk1 = nv_encoder->dp.unk1; | ||
729 | break; | ||
730 | } | ||
731 | } | ||
732 | |||
733 | if (unk0 || unk1) { | ||
734 | tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); | ||
735 | tmp &= 0xfffffe03; | ||
736 | nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0); | ||
737 | |||
738 | tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link)); | ||
739 | tmp &= 0xfef080c0; | ||
740 | nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1); | ||
741 | } | ||
742 | } | ||
743 | |||
744 | static void | ||
745 | nv50_display_unk20_handler(struct drm_device *dev) | 705 | nv50_display_unk20_handler(struct drm_device *dev) |
746 | { | 706 | { |
747 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 707 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -753,7 +713,7 @@ nv50_display_unk20_handler(struct drm_device *dev) | |||
753 | NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); | 713 | NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); |
754 | dcb = disp->irq.dcb; | 714 | dcb = disp->irq.dcb; |
755 | if (dcb) { | 715 | if (dcb) { |
756 | nouveau_bios_run_display_table(dev, dcb, 0, -2); | 716 | nouveau_bios_run_display_table(dev, 0, -2, dcb, -1); |
757 | disp->irq.dcb = NULL; | 717 | disp->irq.dcb = NULL; |
758 | } | 718 | } |
759 | 719 | ||
@@ -837,9 +797,15 @@ nv50_display_unk20_handler(struct drm_device *dev) | |||
837 | } | 797 | } |
838 | 798 | ||
839 | script = nv50_display_script_select(dev, dcb, mc, pclk); | 799 | script = nv50_display_script_select(dev, dcb, mc, pclk); |
840 | nouveau_bios_run_display_table(dev, dcb, script, pclk); | 800 | nouveau_bios_run_display_table(dev, script, pclk, dcb, -1); |
841 | 801 | ||
842 | nv50_display_unk20_dp_hack(dev, dcb); | 802 | if (type == OUTPUT_DP) { |
803 | int link = !(dcb->dpconf.sor.link & 1); | ||
804 | if ((mc & 0x000f0000) == 0x00020000) | ||
805 | nouveau_dp_tu_update(dev, or, link, pclk, 18); | ||
806 | else | ||
807 | nouveau_dp_tu_update(dev, or, link, pclk, 24); | ||
808 | } | ||
843 | 809 | ||
844 | if (dcb->type != OUTPUT_ANALOG) { | 810 | if (dcb->type != OUTPUT_ANALOG) { |
845 | tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); | 811 | tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); |
@@ -904,7 +870,7 @@ nv50_display_unk40_handler(struct drm_device *dev) | |||
904 | if (!dcb) | 870 | if (!dcb) |
905 | goto ack; | 871 | goto ack; |
906 | 872 | ||
907 | nouveau_bios_run_display_table(dev, dcb, script, -pclk); | 873 | nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1); |
908 | nv50_display_unk40_dp_set_tmds(dev, dcb); | 874 | nv50_display_unk40_dp_set_tmds(dev, dcb); |
909 | 875 | ||
910 | ack: | 876 | ack: |
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c index d4f4206dad7e..793a5ccca121 100644 --- a/drivers/gpu/drm/nouveau/nv50_gpio.c +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c | |||
@@ -98,6 +98,37 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) | |||
98 | } | 98 | } |
99 | 99 | ||
100 | int | 100 | int |
101 | nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) | ||
102 | { | ||
103 | struct dcb_gpio_entry *gpio; | ||
104 | u32 v; | ||
105 | |||
106 | gpio = nouveau_bios_gpio_entry(dev, tag); | ||
107 | if (!gpio) | ||
108 | return -ENOENT; | ||
109 | |||
110 | v = nv_rd32(dev, 0x00d610 + (gpio->line * 4)); | ||
111 | v &= 0x00004000; | ||
112 | return (!!v == (gpio->state[1] & 1)); | ||
113 | } | ||
114 | |||
115 | int | ||
116 | nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) | ||
117 | { | ||
118 | struct dcb_gpio_entry *gpio; | ||
119 | u32 v; | ||
120 | |||
121 | gpio = nouveau_bios_gpio_entry(dev, tag); | ||
122 | if (!gpio) | ||
123 | return -ENOENT; | ||
124 | |||
125 | v = gpio->state[state] ^ 2; | ||
126 | |||
127 | nv_mask(dev, 0x00d610 + (gpio->line * 4), 0x00003000, v << 12); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | int | ||
101 | nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag, | 132 | nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag, |
102 | void (*handler)(void *, int), void *data) | 133 | void (*handler)(void *, int), void *data) |
103 | { | 134 | { |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index d43c46caa76e..8c979b31ff61 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -120,70 +120,62 @@ nv50_graph_unload_context(struct drm_device *dev) | |||
120 | return 0; | 120 | return 0; |
121 | } | 121 | } |
122 | 122 | ||
123 | static void | 123 | static int |
124 | nv50_graph_init_reset(struct drm_device *dev) | 124 | nv50_graph_init(struct drm_device *dev, int engine) |
125 | { | ||
126 | uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); | ||
127 | NV_DEBUG(dev, "\n"); | ||
128 | |||
129 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); | ||
130 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e); | ||
131 | } | ||
132 | |||
133 | static void | ||
134 | nv50_graph_init_intr(struct drm_device *dev) | ||
135 | { | ||
136 | NV_DEBUG(dev, "\n"); | ||
137 | |||
138 | nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); | ||
139 | nv_wr32(dev, 0x400138, 0xffffffff); | ||
140 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); | ||
141 | } | ||
142 | |||
143 | static void | ||
144 | nv50_graph_init_regs__nv(struct drm_device *dev) | ||
145 | { | 125 | { |
146 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 126 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
147 | uint32_t units = nv_rd32(dev, 0x1540); | 127 | struct nv50_graph_engine *pgraph = nv_engine(dev, engine); |
128 | u32 units = nv_rd32(dev, 0x001540); | ||
148 | int i; | 129 | int i; |
149 | 130 | ||
150 | NV_DEBUG(dev, "\n"); | 131 | NV_DEBUG(dev, "\n"); |
151 | 132 | ||
133 | /* master reset */ | ||
134 | nv_mask(dev, 0x000200, 0x00200100, 0x00000000); | ||
135 | nv_mask(dev, 0x000200, 0x00200100, 0x00200100); | ||
136 | nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ | ||
137 | |||
138 | /* reset/enable traps and interrupts */ | ||
152 | nv_wr32(dev, 0x400804, 0xc0000000); | 139 | nv_wr32(dev, 0x400804, 0xc0000000); |
153 | nv_wr32(dev, 0x406800, 0xc0000000); | 140 | nv_wr32(dev, 0x406800, 0xc0000000); |
154 | nv_wr32(dev, 0x400c04, 0xc0000000); | 141 | nv_wr32(dev, 0x400c04, 0xc0000000); |
155 | nv_wr32(dev, 0x401800, 0xc0000000); | 142 | nv_wr32(dev, 0x401800, 0xc0000000); |
156 | nv_wr32(dev, 0x405018, 0xc0000000); | 143 | nv_wr32(dev, 0x405018, 0xc0000000); |
157 | nv_wr32(dev, 0x402000, 0xc0000000); | 144 | nv_wr32(dev, 0x402000, 0xc0000000); |
158 | |||
159 | for (i = 0; i < 16; i++) { | 145 | for (i = 0; i < 16; i++) { |
160 | if (units & 1 << i) { | 146 | if (!(units & (1 << i))) |
161 | if (dev_priv->chipset < 0xa0) { | 147 | continue; |
162 | nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000); | 148 | |
163 | nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000); | 149 | if (dev_priv->chipset < 0xa0) { |
164 | nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000); | 150 | nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000); |
165 | } else { | 151 | nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000); |
166 | nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000); | 152 | nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000); |
167 | nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000); | 153 | } else { |
168 | nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000); | 154 | nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000); |
169 | } | 155 | nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000); |
156 | nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000); | ||
170 | } | 157 | } |
171 | } | 158 | } |
172 | 159 | ||
173 | nv_wr32(dev, 0x400108, 0xffffffff); | 160 | nv_wr32(dev, 0x400108, 0xffffffff); |
174 | 161 | nv_wr32(dev, 0x400138, 0xffffffff); | |
175 | nv_wr32(dev, 0x400824, 0x00004000); | 162 | nv_wr32(dev, 0x400100, 0xffffffff); |
163 | nv_wr32(dev, 0x40013c, 0xffffffff); | ||
176 | nv_wr32(dev, 0x400500, 0x00010001); | 164 | nv_wr32(dev, 0x400500, 0x00010001); |
177 | } | ||
178 | |||
179 | static void | ||
180 | nv50_graph_init_zcull(struct drm_device *dev) | ||
181 | { | ||
182 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
183 | int i; | ||
184 | |||
185 | NV_DEBUG(dev, "\n"); | ||
186 | 165 | ||
166 | /* upload context program, initialise ctxctl defaults */ | ||
167 | nv_wr32(dev, 0x400324, 0x00000000); | ||
168 | for (i = 0; i < pgraph->ctxprog_size; i++) | ||
169 | nv_wr32(dev, 0x400328, pgraph->ctxprog[i]); | ||
170 | nv_wr32(dev, 0x400824, 0x00000000); | ||
171 | nv_wr32(dev, 0x400828, 0x00000000); | ||
172 | nv_wr32(dev, 0x40082c, 0x00000000); | ||
173 | nv_wr32(dev, 0x400830, 0x00000000); | ||
174 | nv_wr32(dev, 0x400724, 0x00000000); | ||
175 | nv_wr32(dev, 0x40032c, 0x00000000); | ||
176 | nv_wr32(dev, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */ | ||
177 | |||
178 | /* some unknown zcull magic */ | ||
187 | switch (dev_priv->chipset & 0xf0) { | 179 | switch (dev_priv->chipset & 0xf0) { |
188 | case 0x50: | 180 | case 0x50: |
189 | case 0x80: | 181 | case 0x80: |
@@ -212,43 +204,7 @@ nv50_graph_init_zcull(struct drm_device *dev) | |||
212 | nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000); | 204 | nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000); |
213 | nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000); | 205 | nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000); |
214 | } | 206 | } |
215 | } | ||
216 | |||
217 | static int | ||
218 | nv50_graph_init_ctxctl(struct drm_device *dev) | ||
219 | { | ||
220 | struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR); | ||
221 | int i; | ||
222 | |||
223 | NV_DEBUG(dev, "\n"); | ||
224 | |||
225 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); | ||
226 | for (i = 0; i < pgraph->ctxprog_size; i++) | ||
227 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]); | ||
228 | |||
229 | nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ | ||
230 | nv_wr32(dev, 0x400320, 4); | ||
231 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); | ||
232 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | static int | ||
237 | nv50_graph_init(struct drm_device *dev, int engine) | ||
238 | { | ||
239 | int ret; | ||
240 | |||
241 | NV_DEBUG(dev, "\n"); | ||
242 | |||
243 | nv50_graph_init_reset(dev); | ||
244 | nv50_graph_init_regs__nv(dev); | ||
245 | nv50_graph_init_zcull(dev); | ||
246 | |||
247 | ret = nv50_graph_init_ctxctl(dev); | ||
248 | if (ret) | ||
249 | return ret; | ||
250 | 207 | ||
251 | nv50_graph_init_intr(dev); | ||
252 | return 0; | 208 | return 0; |
253 | } | 209 | } |
254 | 210 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index de9abff12b90..d05c2c3b2444 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c | |||
@@ -40,6 +40,12 @@ | |||
40 | #define CP_FLAG_UNK0B ((0 * 32) + 0xb) | 40 | #define CP_FLAG_UNK0B ((0 * 32) + 0xb) |
41 | #define CP_FLAG_UNK0B_CLEAR 0 | 41 | #define CP_FLAG_UNK0B_CLEAR 0 |
42 | #define CP_FLAG_UNK0B_SET 1 | 42 | #define CP_FLAG_UNK0B_SET 1 |
43 | #define CP_FLAG_XFER_SWITCH ((0 * 32) + 0xe) | ||
44 | #define CP_FLAG_XFER_SWITCH_DISABLE 0 | ||
45 | #define CP_FLAG_XFER_SWITCH_ENABLE 1 | ||
46 | #define CP_FLAG_STATE ((0 * 32) + 0x1c) | ||
47 | #define CP_FLAG_STATE_STOPPED 0 | ||
48 | #define CP_FLAG_STATE_RUNNING 1 | ||
43 | #define CP_FLAG_UNK1D ((0 * 32) + 0x1d) | 49 | #define CP_FLAG_UNK1D ((0 * 32) + 0x1d) |
44 | #define CP_FLAG_UNK1D_CLEAR 0 | 50 | #define CP_FLAG_UNK1D_CLEAR 0 |
45 | #define CP_FLAG_UNK1D_SET 1 | 51 | #define CP_FLAG_UNK1D_SET 1 |
@@ -194,6 +200,9 @@ nv50_grctx_init(struct nouveau_grctx *ctx) | |||
194 | "the devs.\n"); | 200 | "the devs.\n"); |
195 | return -ENOSYS; | 201 | return -ENOSYS; |
196 | } | 202 | } |
203 | |||
204 | cp_set (ctx, STATE, RUNNING); | ||
205 | cp_set (ctx, XFER_SWITCH, ENABLE); | ||
197 | /* decide whether we're loading/unloading the context */ | 206 | /* decide whether we're loading/unloading the context */ |
198 | cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); | 207 | cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); |
199 | cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); | 208 | cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); |
@@ -260,6 +269,8 @@ nv50_grctx_init(struct nouveau_grctx *ctx) | |||
260 | cp_name(ctx, cp_exit); | 269 | cp_name(ctx, cp_exit); |
261 | cp_set (ctx, USER_SAVE, NOT_PENDING); | 270 | cp_set (ctx, USER_SAVE, NOT_PENDING); |
262 | cp_set (ctx, USER_LOAD, NOT_PENDING); | 271 | cp_set (ctx, USER_LOAD, NOT_PENDING); |
272 | cp_set (ctx, XFER_SWITCH, DISABLE); | ||
273 | cp_set (ctx, STATE, STOPPED); | ||
263 | cp_out (ctx, CP_END); | 274 | cp_out (ctx, CP_END); |
264 | ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */ | 275 | ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */ |
265 | 276 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c index 8a2810011bda..3d5a86b98282 100644 --- a/drivers/gpu/drm/nouveau/nv50_pm.c +++ b/drivers/gpu/drm/nouveau/nv50_pm.c | |||
@@ -115,15 +115,15 @@ nv50_pm_clock_set(struct drm_device *dev, void *pre_state) | |||
115 | BIT_M.version == 1 && BIT_M.length >= 0x0b) { | 115 | BIT_M.version == 1 && BIT_M.length >= 0x0b) { |
116 | script = ROM16(BIT_M.data[0x05]); | 116 | script = ROM16(BIT_M.data[0x05]); |
117 | if (script) | 117 | if (script) |
118 | nouveau_bios_run_init_table(dev, script, NULL); | 118 | nouveau_bios_run_init_table(dev, script, NULL, -1); |
119 | script = ROM16(BIT_M.data[0x07]); | 119 | script = ROM16(BIT_M.data[0x07]); |
120 | if (script) | 120 | if (script) |
121 | nouveau_bios_run_init_table(dev, script, NULL); | 121 | nouveau_bios_run_init_table(dev, script, NULL, -1); |
122 | script = ROM16(BIT_M.data[0x09]); | 122 | script = ROM16(BIT_M.data[0x09]); |
123 | if (script) | 123 | if (script) |
124 | nouveau_bios_run_init_table(dev, script, NULL); | 124 | nouveau_bios_run_init_table(dev, script, NULL, -1); |
125 | 125 | ||
126 | nouveau_bios_run_init_table(dev, perflvl->memscript, NULL); | 126 | nouveau_bios_run_init_table(dev, perflvl->memscript, NULL, -1); |
127 | } | 127 | } |
128 | 128 | ||
129 | if (state->type == PLL_MEMORY) { | 129 | if (state->type == PLL_MEMORY) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index ffe8b483b7b0..2633aa8554eb 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
@@ -124,7 +124,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) | |||
124 | if (mode == DRM_MODE_DPMS_ON) { | 124 | if (mode == DRM_MODE_DPMS_ON) { |
125 | u8 status = DP_SET_POWER_D0; | 125 | u8 status = DP_SET_POWER_D0; |
126 | nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); | 126 | nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); |
127 | nouveau_dp_link_train(encoder); | 127 | nouveau_dp_link_train(encoder, nv_encoder->dp.datarate); |
128 | } else { | 128 | } else { |
129 | u8 status = DP_SET_POWER_D3; | 129 | u8 status = DP_SET_POWER_D3; |
130 | nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); | 130 | nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); |
@@ -187,14 +187,13 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
187 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 187 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
188 | struct drm_device *dev = encoder->dev; | 188 | struct drm_device *dev = encoder->dev; |
189 | struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); | 189 | struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); |
190 | struct nouveau_connector *nv_connector; | ||
190 | uint32_t mode_ctl = 0; | 191 | uint32_t mode_ctl = 0; |
191 | int ret; | 192 | int ret; |
192 | 193 | ||
193 | NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n", | 194 | NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n", |
194 | nv_encoder->or, nv_encoder->dcb->type, crtc->index); | 195 | nv_encoder->or, nv_encoder->dcb->type, crtc->index); |
195 | 196 | ||
196 | nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); | ||
197 | |||
198 | switch (nv_encoder->dcb->type) { | 197 | switch (nv_encoder->dcb->type) { |
199 | case OUTPUT_TMDS: | 198 | case OUTPUT_TMDS: |
200 | if (nv_encoder->dcb->sorconf.link & 1) { | 199 | if (nv_encoder->dcb->sorconf.link & 1) { |
@@ -206,7 +205,15 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
206 | mode_ctl = 0x0200; | 205 | mode_ctl = 0x0200; |
207 | break; | 206 | break; |
208 | case OUTPUT_DP: | 207 | case OUTPUT_DP: |
209 | mode_ctl |= (nv_encoder->dp.mc_unknown << 16); | 208 | nv_connector = nouveau_encoder_connector_get(nv_encoder); |
209 | if (nv_connector && nv_connector->base.display_info.bpc == 6) { | ||
210 | nv_encoder->dp.datarate = crtc->mode->clock * 18 / 8; | ||
211 | mode_ctl |= 0x00020000; | ||
212 | } else { | ||
213 | nv_encoder->dp.datarate = crtc->mode->clock * 24 / 8; | ||
214 | mode_ctl |= 0x00050000; | ||
215 | } | ||
216 | |||
210 | if (nv_encoder->dcb->sorconf.link & 1) | 217 | if (nv_encoder->dcb->sorconf.link & 1) |
211 | mode_ctl |= 0x00000800; | 218 | mode_ctl |= 0x00000800; |
212 | else | 219 | else |
@@ -227,6 +234,8 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
227 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) | 234 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) |
228 | mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC; | 235 | mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC; |
229 | 236 | ||
237 | nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); | ||
238 | |||
230 | ret = RING_SPACE(evo, 2); | 239 | ret = RING_SPACE(evo, 2); |
231 | if (ret) { | 240 | if (ret) { |
232 | NV_ERROR(dev, "no space while connecting SOR\n"); | 241 | NV_ERROR(dev, "no space while connecting SOR\n"); |
@@ -313,31 +322,6 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry) | |||
313 | encoder->possible_crtcs = entry->heads; | 322 | encoder->possible_crtcs = entry->heads; |
314 | encoder->possible_clones = 0; | 323 | encoder->possible_clones = 0; |
315 | 324 | ||
316 | if (nv_encoder->dcb->type == OUTPUT_DP) { | ||
317 | int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); | ||
318 | uint32_t tmp; | ||
319 | |||
320 | tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); | ||
321 | if (!tmp) | ||
322 | tmp = nv_rd32(dev, 0x610798 + (or * 8)); | ||
323 | |||
324 | switch ((tmp & 0x00000f00) >> 8) { | ||
325 | case 8: | ||
326 | case 9: | ||
327 | nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16; | ||
328 | tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); | ||
329 | nv_encoder->dp.unk0 = tmp & 0x000001fc; | ||
330 | tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link)); | ||
331 | nv_encoder->dp.unk1 = tmp & 0x010f7f3f; | ||
332 | break; | ||
333 | default: | ||
334 | break; | ||
335 | } | ||
336 | |||
337 | if (!nv_encoder->dp.mc_unknown) | ||
338 | nv_encoder->dp.mc_unknown = 5; | ||
339 | } | ||
340 | |||
341 | drm_mode_connector_attach_encoder(connector, encoder); | 325 | drm_mode_connector_attach_encoder(connector, encoder); |
342 | return 0; | 326 | return 0; |
343 | } | 327 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c index af32daecd1ed..9da23838e63e 100644 --- a/drivers/gpu/drm/nouveau/nv50_vram.c +++ b/drivers/gpu/drm/nouveau/nv50_vram.c | |||
@@ -51,7 +51,7 @@ void | |||
51 | nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem) | 51 | nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem) |
52 | { | 52 | { |
53 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 53 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
54 | struct nouveau_mm *mm = dev_priv->engine.vram.mm; | 54 | struct nouveau_mm *mm = &dev_priv->engine.vram.mm; |
55 | struct nouveau_mm_node *this; | 55 | struct nouveau_mm_node *this; |
56 | struct nouveau_mem *mem; | 56 | struct nouveau_mem *mem; |
57 | 57 | ||
@@ -82,7 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, | |||
82 | u32 memtype, struct nouveau_mem **pmem) | 82 | u32 memtype, struct nouveau_mem **pmem) |
83 | { | 83 | { |
84 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 84 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
85 | struct nouveau_mm *mm = dev_priv->engine.vram.mm; | 85 | struct nouveau_mm *mm = &dev_priv->engine.vram.mm; |
86 | struct nouveau_mm_node *r; | 86 | struct nouveau_mm_node *r; |
87 | struct nouveau_mem *mem; | 87 | struct nouveau_mem *mem; |
88 | int comp = (memtype & 0x300) >> 8; | 88 | int comp = (memtype & 0x300) >> 8; |
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c index e4b2b9e934b2..618c144b7a30 100644 --- a/drivers/gpu/drm/nouveau/nva3_pm.c +++ b/drivers/gpu/drm/nouveau/nva3_pm.c | |||
@@ -27,178 +27,316 @@ | |||
27 | #include "nouveau_bios.h" | 27 | #include "nouveau_bios.h" |
28 | #include "nouveau_pm.h" | 28 | #include "nouveau_pm.h" |
29 | 29 | ||
30 | /* This is actually a lot more complex than it appears here, but hopefully | 30 | static u32 read_clk(struct drm_device *, int, bool); |
31 | * this should be able to deal with what the VBIOS leaves for us.. | 31 | static u32 read_pll(struct drm_device *, int, u32); |
32 | * | ||
33 | * If not, well, I'll jump off that bridge when I come to it. | ||
34 | */ | ||
35 | 32 | ||
36 | struct nva3_pm_state { | 33 | static u32 |
37 | enum pll_types type; | 34 | read_vco(struct drm_device *dev, int clk) |
38 | u32 src0; | 35 | { |
39 | u32 src1; | 36 | u32 sctl = nv_rd32(dev, 0x4120 + (clk * 4)); |
40 | u32 ctrl; | 37 | if ((sctl & 0x00000030) != 0x00000030) |
41 | u32 coef; | 38 | return read_pll(dev, 0x41, 0x00e820); |
42 | u32 old_pnm; | 39 | return read_pll(dev, 0x42, 0x00e8a0); |
43 | u32 new_pnm; | 40 | } |
44 | u32 new_div; | ||
45 | }; | ||
46 | 41 | ||
47 | static int | 42 | static u32 |
48 | nva3_pm_pll_offset(u32 id) | 43 | read_clk(struct drm_device *dev, int clk, bool ignore_en) |
49 | { | 44 | { |
50 | static const u32 pll_map[] = { | 45 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
51 | 0x00, PLL_CORE, | 46 | u32 sctl, sdiv, sclk; |
52 | 0x01, PLL_SHADER, | 47 | |
53 | 0x02, PLL_MEMORY, | 48 | /* refclk for the 0xe8xx plls is a fixed frequency */ |
54 | 0x00, 0x00 | 49 | if (clk >= 0x40) { |
55 | }; | 50 | if (dev_priv->chipset == 0xaf) { |
56 | const u32 *map = pll_map; | 51 | /* no joke.. seriously.. sigh.. */ |
57 | 52 | return nv_rd32(dev, 0x00471c) * 1000; | |
58 | while (map[1]) { | 53 | } |
59 | if (id == map[1]) | 54 | |
60 | return map[0]; | 55 | return dev_priv->crystal; |
61 | map += 2; | ||
62 | } | 56 | } |
63 | 57 | ||
64 | return -ENOENT; | 58 | sctl = nv_rd32(dev, 0x4120 + (clk * 4)); |
59 | if (!ignore_en && !(sctl & 0x00000100)) | ||
60 | return 0; | ||
61 | |||
62 | switch (sctl & 0x00003000) { | ||
63 | case 0x00000000: | ||
64 | return dev_priv->crystal; | ||
65 | case 0x00002000: | ||
66 | if (sctl & 0x00000040) | ||
67 | return 108000; | ||
68 | return 100000; | ||
69 | case 0x00003000: | ||
70 | sclk = read_vco(dev, clk); | ||
71 | sdiv = ((sctl & 0x003f0000) >> 16) + 2; | ||
72 | return (sclk * 2) / sdiv; | ||
73 | default: | ||
74 | return 0; | ||
75 | } | ||
65 | } | 76 | } |
66 | 77 | ||
67 | int | 78 | static u32 |
68 | nva3_pm_clock_get(struct drm_device *dev, u32 id) | 79 | read_pll(struct drm_device *dev, int clk, u32 pll) |
80 | { | ||
81 | u32 ctrl = nv_rd32(dev, pll + 0); | ||
82 | u32 sclk = 0, P = 1, N = 1, M = 1; | ||
83 | |||
84 | if (!(ctrl & 0x00000008)) { | ||
85 | if (ctrl & 0x00000001) { | ||
86 | u32 coef = nv_rd32(dev, pll + 4); | ||
87 | M = (coef & 0x000000ff) >> 0; | ||
88 | N = (coef & 0x0000ff00) >> 8; | ||
89 | P = (coef & 0x003f0000) >> 16; | ||
90 | |||
91 | /* no post-divider on these.. */ | ||
92 | if ((pll & 0x00ff00) == 0x00e800) | ||
93 | P = 1; | ||
94 | |||
95 | sclk = read_clk(dev, 0x00 + clk, false); | ||
96 | } | ||
97 | } else { | ||
98 | sclk = read_clk(dev, 0x10 + clk, false); | ||
99 | } | ||
100 | |||
101 | return sclk * N / (M * P); | ||
102 | } | ||
103 | |||
104 | struct creg { | ||
105 | u32 clk; | ||
106 | u32 pll; | ||
107 | }; | ||
108 | |||
109 | static int | ||
110 | calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg) | ||
69 | { | 111 | { |
70 | u32 src0, src1, ctrl, coef; | 112 | struct pll_lims limits; |
71 | struct pll_lims pll; | 113 | u32 oclk, sclk, sdiv; |
72 | int ret, off; | 114 | int P, N, M, diff; |
73 | int P, N, M; | 115 | int ret; |
116 | |||
117 | reg->pll = 0; | ||
118 | reg->clk = 0; | ||
119 | if (!khz) { | ||
120 | NV_DEBUG(dev, "no clock for 0x%04x/0x%02x\n", pll, clk); | ||
121 | return 0; | ||
122 | } | ||
74 | 123 | ||
75 | ret = get_pll_limits(dev, id, &pll); | 124 | switch (khz) { |
125 | case 27000: | ||
126 | reg->clk = 0x00000100; | ||
127 | return khz; | ||
128 | case 100000: | ||
129 | reg->clk = 0x00002100; | ||
130 | return khz; | ||
131 | case 108000: | ||
132 | reg->clk = 0x00002140; | ||
133 | return khz; | ||
134 | default: | ||
135 | sclk = read_vco(dev, clk); | ||
136 | sdiv = min((sclk * 2) / (khz - 2999), (u32)65); | ||
137 | /* if the clock has a PLL attached, and we can get a within | ||
138 | * [-2, 3) MHz of a divider, we'll disable the PLL and use | ||
139 | * the divider instead. | ||
140 | * | ||
141 | * divider can go as low as 2, limited here because NVIDIA | ||
142 | * and the VBIOS on my NVA8 seem to prefer using the PLL | ||
143 | * for 810MHz - is there a good reason? | ||
144 | */ | ||
145 | if (sdiv > 4) { | ||
146 | oclk = (sclk * 2) / sdiv; | ||
147 | diff = khz - oclk; | ||
148 | if (!pll || (diff >= -2000 && diff < 3000)) { | ||
149 | reg->clk = (((sdiv - 2) << 16) | 0x00003100); | ||
150 | return oclk; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | if (!pll) { | ||
155 | NV_ERROR(dev, "bad freq %02x: %d %d\n", clk, khz, sclk); | ||
156 | return -ERANGE; | ||
157 | } | ||
158 | |||
159 | break; | ||
160 | } | ||
161 | |||
162 | ret = get_pll_limits(dev, pll, &limits); | ||
76 | if (ret) | 163 | if (ret) |
77 | return ret; | 164 | return ret; |
78 | 165 | ||
79 | off = nva3_pm_pll_offset(id); | 166 | limits.refclk = read_clk(dev, clk - 0x10, true); |
80 | if (off < 0) | 167 | if (!limits.refclk) |
81 | return off; | 168 | return -EINVAL; |
169 | |||
170 | ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P); | ||
171 | if (ret >= 0) { | ||
172 | reg->clk = nv_rd32(dev, 0x4120 + (clk * 4)); | ||
173 | reg->pll = (P << 16) | (N << 8) | M; | ||
174 | } | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | static void | ||
179 | prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg) | ||
180 | { | ||
181 | const u32 src0 = 0x004120 + (clk * 4); | ||
182 | const u32 src1 = 0x004160 + (clk * 4); | ||
183 | const u32 ctrl = pll + 0; | ||
184 | const u32 coef = pll + 4; | ||
185 | u32 cntl; | ||
186 | |||
187 | if (!reg->clk && !reg->pll) { | ||
188 | NV_DEBUG(dev, "no clock for %02x\n", clk); | ||
189 | return; | ||
190 | } | ||
82 | 191 | ||
83 | src0 = nv_rd32(dev, 0x4120 + (off * 4)); | 192 | cntl = nv_rd32(dev, ctrl) & 0xfffffff2; |
84 | src1 = nv_rd32(dev, 0x4160 + (off * 4)); | 193 | if (reg->pll) { |
85 | ctrl = nv_rd32(dev, pll.reg + 0); | 194 | nv_mask(dev, src0, 0x00000101, 0x00000101); |
86 | coef = nv_rd32(dev, pll.reg + 4); | 195 | nv_wr32(dev, coef, reg->pll); |
87 | NV_DEBUG(dev, "PLL %02x: 0x%08x 0x%08x 0x%08x 0x%08x\n", | 196 | nv_wr32(dev, ctrl, cntl | 0x00000015); |
88 | id, src0, src1, ctrl, coef); | 197 | nv_mask(dev, src1, 0x00000100, 0x00000000); |
198 | nv_mask(dev, src1, 0x00000001, 0x00000000); | ||
199 | } else { | ||
200 | nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk); | ||
201 | nv_wr32(dev, ctrl, cntl | 0x0000001d); | ||
202 | nv_mask(dev, ctrl, 0x00000001, 0x00000000); | ||
203 | nv_mask(dev, src0, 0x00000100, 0x00000000); | ||
204 | nv_mask(dev, src0, 0x00000001, 0x00000000); | ||
205 | } | ||
206 | } | ||
89 | 207 | ||
90 | if (ctrl & 0x00000008) { | 208 | static void |
91 | u32 div = ((src1 & 0x003c0000) >> 18) + 1; | 209 | prog_clk(struct drm_device *dev, int clk, struct creg *reg) |
92 | return (pll.refclk * 2) / div; | 210 | { |
211 | if (!reg->clk) { | ||
212 | NV_DEBUG(dev, "no clock for %02x\n", clk); | ||
213 | return; | ||
93 | } | 214 | } |
94 | 215 | ||
95 | P = (coef & 0x003f0000) >> 16; | 216 | nv_mask(dev, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk); |
96 | N = (coef & 0x0000ff00) >> 8; | 217 | } |
97 | M = (coef & 0x000000ff); | 218 | |
98 | return pll.refclk * N / M / P; | 219 | int |
220 | nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) | ||
221 | { | ||
222 | perflvl->core = read_pll(dev, 0x00, 0x4200); | ||
223 | perflvl->shader = read_pll(dev, 0x01, 0x4220); | ||
224 | perflvl->memory = read_pll(dev, 0x02, 0x4000); | ||
225 | perflvl->unka0 = read_clk(dev, 0x20, false); | ||
226 | perflvl->vdec = read_clk(dev, 0x21, false); | ||
227 | perflvl->daemon = read_clk(dev, 0x25, false); | ||
228 | perflvl->copy = perflvl->core; | ||
229 | return 0; | ||
99 | } | 230 | } |
100 | 231 | ||
232 | struct nva3_pm_state { | ||
233 | struct creg nclk; | ||
234 | struct creg sclk; | ||
235 | struct creg mclk; | ||
236 | struct creg vdec; | ||
237 | struct creg unka0; | ||
238 | }; | ||
239 | |||
101 | void * | 240 | void * |
102 | nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, | 241 | nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) |
103 | u32 id, int khz) | ||
104 | { | 242 | { |
105 | struct nva3_pm_state *pll; | 243 | struct nva3_pm_state *info; |
106 | struct pll_lims limits; | 244 | int ret; |
107 | int N, M, P, diff; | ||
108 | int ret, off; | ||
109 | 245 | ||
110 | ret = get_pll_limits(dev, id, &limits); | 246 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
247 | if (!info) | ||
248 | return ERR_PTR(-ENOMEM); | ||
249 | |||
250 | ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk); | ||
111 | if (ret < 0) | 251 | if (ret < 0) |
112 | return (ret == -ENOENT) ? NULL : ERR_PTR(ret); | 252 | goto out; |
113 | 253 | ||
114 | off = nva3_pm_pll_offset(id); | 254 | ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk); |
115 | if (id < 0) | 255 | if (ret < 0) |
116 | return ERR_PTR(-EINVAL); | 256 | goto out; |
117 | 257 | ||
258 | ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk); | ||
259 | if (ret < 0) | ||
260 | goto out; | ||
118 | 261 | ||
119 | pll = kzalloc(sizeof(*pll), GFP_KERNEL); | 262 | ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0); |
120 | if (!pll) | 263 | if (ret < 0) |
121 | return ERR_PTR(-ENOMEM); | 264 | goto out; |
122 | pll->type = id; | ||
123 | pll->src0 = 0x004120 + (off * 4); | ||
124 | pll->src1 = 0x004160 + (off * 4); | ||
125 | pll->ctrl = limits.reg + 0; | ||
126 | pll->coef = limits.reg + 4; | ||
127 | |||
128 | /* If target clock is within [-2, 3) MHz of a divisor, we'll | ||
129 | * use that instead of calculating MNP values | ||
130 | */ | ||
131 | pll->new_div = min((limits.refclk * 2) / (khz - 2999), 16); | ||
132 | if (pll->new_div) { | ||
133 | diff = khz - ((limits.refclk * 2) / pll->new_div); | ||
134 | if (diff < -2000 || diff >= 3000) | ||
135 | pll->new_div = 0; | ||
136 | } | ||
137 | 265 | ||
138 | if (!pll->new_div) { | 266 | ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec); |
139 | ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P); | 267 | if (ret < 0) |
140 | if (ret < 0) | 268 | goto out; |
141 | return ERR_PTR(ret); | ||
142 | 269 | ||
143 | pll->new_pnm = (P << 16) | (N << 8) | M; | 270 | out: |
144 | pll->new_div = 2 - 1; | 271 | if (ret < 0) { |
145 | } else { | 272 | kfree(info); |
146 | pll->new_pnm = 0; | 273 | info = ERR_PTR(ret); |
147 | pll->new_div--; | ||
148 | } | 274 | } |
275 | return info; | ||
276 | } | ||
277 | |||
278 | static bool | ||
279 | nva3_pm_grcp_idle(void *data) | ||
280 | { | ||
281 | struct drm_device *dev = data; | ||
149 | 282 | ||
150 | if ((nv_rd32(dev, pll->src1) & 0x00000101) != 0x00000101) | 283 | if (!(nv_rd32(dev, 0x400304) & 0x00000001)) |
151 | pll->old_pnm = nv_rd32(dev, pll->coef); | 284 | return true; |
152 | return pll; | 285 | if (nv_rd32(dev, 0x400308) == 0x0050001c) |
286 | return true; | ||
287 | return false; | ||
153 | } | 288 | } |
154 | 289 | ||
155 | void | 290 | void |
156 | nva3_pm_clock_set(struct drm_device *dev, void *pre_state) | 291 | nva3_pm_clocks_set(struct drm_device *dev, void *pre_state) |
157 | { | 292 | { |
158 | struct nva3_pm_state *pll = pre_state; | 293 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
159 | u32 ctrl = 0; | 294 | struct nva3_pm_state *info = pre_state; |
295 | unsigned long flags; | ||
160 | 296 | ||
161 | /* For the memory clock, NVIDIA will build a "script" describing | 297 | /* prevent any new grctx switches from starting */ |
162 | * the reclocking process and ask PDAEMON to execute it. | 298 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
163 | */ | 299 | nv_wr32(dev, 0x400324, 0x00000000); |
164 | if (pll->type == PLL_MEMORY) { | 300 | nv_wr32(dev, 0x400328, 0x0050001c); /* wait flag 0x1c */ |
165 | nv_wr32(dev, 0x100210, 0); | 301 | /* wait for any pending grctx switches to complete */ |
166 | nv_wr32(dev, 0x1002dc, 1); | 302 | if (!nv_wait_cb(dev, nva3_pm_grcp_idle, dev)) { |
167 | nv_wr32(dev, 0x004018, 0x00001000); | 303 | NV_ERROR(dev, "pm: ctxprog didn't go idle\n"); |
168 | ctrl = 0x18000100; | 304 | goto cleanup; |
169 | } | 305 | } |
170 | 306 | /* freeze PFIFO */ | |
171 | if (pll->old_pnm || !pll->new_pnm) { | 307 | nv_mask(dev, 0x002504, 0x00000001, 0x00000001); |
172 | nv_mask(dev, pll->src1, 0x003c0101, 0x00000101 | | 308 | if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) { |
173 | (pll->new_div << 18)); | 309 | NV_ERROR(dev, "pm: fifo didn't go idle\n"); |
174 | nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl); | 310 | goto cleanup; |
175 | nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000); | ||
176 | } | 311 | } |
177 | 312 | ||
178 | if (pll->new_pnm) { | 313 | prog_pll(dev, 0x00, 0x004200, &info->nclk); |
179 | nv_mask(dev, pll->src0, 0x00000101, 0x00000101); | 314 | prog_pll(dev, 0x01, 0x004220, &info->sclk); |
180 | nv_wr32(dev, pll->coef, pll->new_pnm); | 315 | prog_clk(dev, 0x20, &info->unka0); |
181 | nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl); | 316 | prog_clk(dev, 0x21, &info->vdec); |
182 | nv_mask(dev, pll->ctrl, 0x00000010, 0x00000000); | ||
183 | nv_mask(dev, pll->ctrl, 0x00020010, 0x00020010); | ||
184 | nv_wr32(dev, pll->ctrl, 0x00010015 | ctrl); | ||
185 | nv_mask(dev, pll->src1, 0x00000100, 0x00000000); | ||
186 | nv_mask(dev, pll->src1, 0x00000001, 0x00000000); | ||
187 | if (pll->type == PLL_MEMORY) | ||
188 | nv_wr32(dev, 0x4018, 0x10005000); | ||
189 | } else { | ||
190 | nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000); | ||
191 | nv_mask(dev, pll->src0, 0x00000100, 0x00000000); | ||
192 | nv_mask(dev, pll->src0, 0x00000001, 0x00000000); | ||
193 | if (pll->type == PLL_MEMORY) | ||
194 | nv_wr32(dev, 0x4018, 0x1000d000); | ||
195 | } | ||
196 | 317 | ||
197 | if (pll->type == PLL_MEMORY) { | 318 | if (info->mclk.clk || info->mclk.pll) { |
319 | nv_wr32(dev, 0x100210, 0); | ||
320 | nv_wr32(dev, 0x1002dc, 1); | ||
321 | nv_wr32(dev, 0x004018, 0x00001000); | ||
322 | prog_pll(dev, 0x02, 0x004000, &info->mclk); | ||
323 | if (nv_rd32(dev, 0x4000) & 0x00000008) | ||
324 | nv_wr32(dev, 0x004018, 0x1000d000); | ||
325 | else | ||
326 | nv_wr32(dev, 0x004018, 0x10005000); | ||
198 | nv_wr32(dev, 0x1002dc, 0); | 327 | nv_wr32(dev, 0x1002dc, 0); |
199 | nv_wr32(dev, 0x100210, 0x80000000); | 328 | nv_wr32(dev, 0x100210, 0x80000000); |
200 | } | 329 | } |
201 | 330 | ||
202 | kfree(pll); | 331 | cleanup: |
332 | /* unfreeze PFIFO */ | ||
333 | nv_mask(dev, 0x002504, 0x00000001, 0x00000000); | ||
334 | /* restore ctxprog to normal */ | ||
335 | nv_wr32(dev, 0x400324, 0x00000000); | ||
336 | nv_wr32(dev, 0x400328, 0x0070009c); /* set flag 0x1c */ | ||
337 | /* unblock it if necessary */ | ||
338 | if (nv_rd32(dev, 0x400308) == 0x0050001c) | ||
339 | nv_mask(dev, 0x400824, 0x10000000, 0x10000000); | ||
340 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
341 | kfree(info); | ||
203 | } | 342 | } |
204 | |||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c index 08e6b118f021..5bf55038fd92 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fb.c +++ b/drivers/gpu/drm/nouveau/nvc0_fb.c | |||
@@ -32,6 +32,30 @@ struct nvc0_fb_priv { | |||
32 | dma_addr_t r100c10; | 32 | dma_addr_t r100c10; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | static inline void | ||
36 | nvc0_mfb_subp_isr(struct drm_device *dev, int unit, int subp) | ||
37 | { | ||
38 | u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400); | ||
39 | u32 stat = nv_rd32(dev, subp_base + 0x020); | ||
40 | |||
41 | if (stat) { | ||
42 | NV_INFO(dev, "PMFB%d_SUBP%d: 0x%08x\n", unit, subp, stat); | ||
43 | nv_wr32(dev, subp_base + 0x020, stat); | ||
44 | } | ||
45 | } | ||
46 | |||
47 | static void | ||
48 | nvc0_mfb_isr(struct drm_device *dev) | ||
49 | { | ||
50 | u32 units = nv_rd32(dev, 0x00017c); | ||
51 | while (units) { | ||
52 | u32 subp, unit = ffs(units) - 1; | ||
53 | for (subp = 0; subp < 2; subp++) | ||
54 | nvc0_mfb_subp_isr(dev, unit, subp); | ||
55 | units &= ~(1 << unit); | ||
56 | } | ||
57 | } | ||
58 | |||
35 | static void | 59 | static void |
36 | nvc0_fb_destroy(struct drm_device *dev) | 60 | nvc0_fb_destroy(struct drm_device *dev) |
37 | { | 61 | { |
@@ -39,6 +63,8 @@ nvc0_fb_destroy(struct drm_device *dev) | |||
39 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | 63 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
40 | struct nvc0_fb_priv *priv = pfb->priv; | 64 | struct nvc0_fb_priv *priv = pfb->priv; |
41 | 65 | ||
66 | nouveau_irq_unregister(dev, 25); | ||
67 | |||
42 | if (priv->r100c10_page) { | 68 | if (priv->r100c10_page) { |
43 | pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE, | 69 | pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE, |
44 | PCI_DMA_BIDIRECTIONAL); | 70 | PCI_DMA_BIDIRECTIONAL); |
@@ -74,6 +100,7 @@ nvc0_fb_create(struct drm_device *dev) | |||
74 | return -EFAULT; | 100 | return -EFAULT; |
75 | } | 101 | } |
76 | 102 | ||
103 | nouveau_irq_register(dev, 25, nvc0_mfb_isr); | ||
77 | return 0; | 104 | return 0; |
78 | } | 105 | } |
79 | 106 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c index 6f9f341c3e86..dcbe0d5d0241 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c | |||
@@ -322,7 +322,7 @@ nvc0_fifo_init(struct drm_device *dev) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | /* PSUBFIFO[n] */ | 324 | /* PSUBFIFO[n] */ |
325 | for (i = 0; i < 3; i++) { | 325 | for (i = 0; i < priv->spoon_nr; i++) { |
326 | nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); | 326 | nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); |
327 | nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ | 327 | nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ |
328 | nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */ | 328 | nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */ |
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 5b2f6f420468..4b8d0b3f7d2b 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c | |||
@@ -390,7 +390,7 @@ nvc0_graph_init_gpc_0(struct drm_device *dev) | |||
390 | } | 390 | } |
391 | 391 | ||
392 | nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918); | 392 | nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918); |
393 | nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr); | 393 | nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800)); |
394 | } | 394 | } |
395 | 395 | ||
396 | static void | 396 | static void |
@@ -700,22 +700,6 @@ nvc0_graph_isr(struct drm_device *dev) | |||
700 | nv_wr32(dev, 0x400500, 0x00010001); | 700 | nv_wr32(dev, 0x400500, 0x00010001); |
701 | } | 701 | } |
702 | 702 | ||
703 | static void | ||
704 | nvc0_runk140_isr(struct drm_device *dev) | ||
705 | { | ||
706 | u32 units = nv_rd32(dev, 0x00017c) & 0x1f; | ||
707 | |||
708 | while (units) { | ||
709 | u32 unit = ffs(units) - 1; | ||
710 | u32 reg = 0x140000 + unit * 0x2000; | ||
711 | u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0); | ||
712 | u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0); | ||
713 | |||
714 | NV_DEBUG(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1); | ||
715 | units &= ~(1 << unit); | ||
716 | } | ||
717 | } | ||
718 | |||
719 | static int | 703 | static int |
720 | nvc0_graph_create_fw(struct drm_device *dev, const char *fwname, | 704 | nvc0_graph_create_fw(struct drm_device *dev, const char *fwname, |
721 | struct nvc0_graph_fuc *fuc) | 705 | struct nvc0_graph_fuc *fuc) |
@@ -764,7 +748,6 @@ nvc0_graph_destroy(struct drm_device *dev, int engine) | |||
764 | } | 748 | } |
765 | 749 | ||
766 | nouveau_irq_unregister(dev, 12); | 750 | nouveau_irq_unregister(dev, 12); |
767 | nouveau_irq_unregister(dev, 25); | ||
768 | 751 | ||
769 | nouveau_gpuobj_ref(NULL, &priv->unk4188b8); | 752 | nouveau_gpuobj_ref(NULL, &priv->unk4188b8); |
770 | nouveau_gpuobj_ref(NULL, &priv->unk4188b4); | 753 | nouveau_gpuobj_ref(NULL, &priv->unk4188b4); |
@@ -803,7 +786,6 @@ nvc0_graph_create(struct drm_device *dev) | |||
803 | 786 | ||
804 | NVOBJ_ENGINE_ADD(dev, GR, &priv->base); | 787 | NVOBJ_ENGINE_ADD(dev, GR, &priv->base); |
805 | nouveau_irq_register(dev, 12, nvc0_graph_isr); | 788 | nouveau_irq_register(dev, 12, nvc0_graph_isr); |
806 | nouveau_irq_register(dev, 25, nvc0_runk140_isr); | ||
807 | 789 | ||
808 | if (nouveau_ctxfw) { | 790 | if (nouveau_ctxfw) { |
809 | NV_INFO(dev, "PGRAPH: using external firmware\n"); | 791 | NV_INFO(dev, "PGRAPH: using external firmware\n"); |
@@ -864,6 +846,9 @@ nvc0_graph_create(struct drm_device *dev) | |||
864 | case 0xce: /* 4/4/0/0, 4 */ | 846 | case 0xce: /* 4/4/0/0, 4 */ |
865 | priv->magic_not_rop_nr = 0x03; | 847 | priv->magic_not_rop_nr = 0x03; |
866 | break; | 848 | break; |
849 | case 0xcf: /* 4/0/0/0, 3 */ | ||
850 | priv->magic_not_rop_nr = 0x03; | ||
851 | break; | ||
867 | } | 852 | } |
868 | 853 | ||
869 | if (!priv->magic_not_rop_nr) { | 854 | if (!priv->magic_not_rop_nr) { |
@@ -889,20 +874,3 @@ error: | |||
889 | nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR); | 874 | nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR); |
890 | return ret; | 875 | return ret; |
891 | } | 876 | } |
892 | |||
893 | MODULE_FIRMWARE("nouveau/nvc0_fuc409c"); | ||
894 | MODULE_FIRMWARE("nouveau/nvc0_fuc409d"); | ||
895 | MODULE_FIRMWARE("nouveau/nvc0_fuc41ac"); | ||
896 | MODULE_FIRMWARE("nouveau/nvc0_fuc41ad"); | ||
897 | MODULE_FIRMWARE("nouveau/nvc3_fuc409c"); | ||
898 | MODULE_FIRMWARE("nouveau/nvc3_fuc409d"); | ||
899 | MODULE_FIRMWARE("nouveau/nvc3_fuc41ac"); | ||
900 | MODULE_FIRMWARE("nouveau/nvc3_fuc41ad"); | ||
901 | MODULE_FIRMWARE("nouveau/nvc4_fuc409c"); | ||
902 | MODULE_FIRMWARE("nouveau/nvc4_fuc409d"); | ||
903 | MODULE_FIRMWARE("nouveau/nvc4_fuc41ac"); | ||
904 | MODULE_FIRMWARE("nouveau/nvc4_fuc41ad"); | ||
905 | MODULE_FIRMWARE("nouveau/fuc409c"); | ||
906 | MODULE_FIRMWARE("nouveau/fuc409d"); | ||
907 | MODULE_FIRMWARE("nouveau/fuc41ac"); | ||
908 | MODULE_FIRMWARE("nouveau/fuc41ad"); | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h index 55689e997286..636fe9812f79 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.h +++ b/drivers/gpu/drm/nouveau/nvc0_graph.h | |||
@@ -82,6 +82,7 @@ nvc0_graph_class(struct drm_device *dev) | |||
82 | case 0xc3: | 82 | case 0xc3: |
83 | case 0xc4: | 83 | case 0xc4: |
84 | case 0xce: /* guess, mmio trace shows only 0x9097 state */ | 84 | case 0xce: /* guess, mmio trace shows only 0x9097 state */ |
85 | case 0xcf: /* guess, mmio trace shows only 0x9097 state */ | ||
85 | return 0x9097; | 86 | return 0x9097; |
86 | case 0xc1: | 87 | case 0xc1: |
87 | return 0x9197; | 88 | return 0x9197; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index 31018eaf5279..dd0e6a736b3b 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c | |||
@@ -1678,7 +1678,10 @@ nvc0_grctx_generate_tp(struct drm_device *dev) | |||
1678 | nv_wr32(dev, 0x419c04, 0x00000006); | 1678 | nv_wr32(dev, 0x419c04, 0x00000006); |
1679 | nv_wr32(dev, 0x419c08, 0x00000002); | 1679 | nv_wr32(dev, 0x419c08, 0x00000002); |
1680 | nv_wr32(dev, 0x419c20, 0x00000000); | 1680 | nv_wr32(dev, 0x419c20, 0x00000000); |
1681 | nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048 | 1681 | if (chipset == 0xce || chipset == 0xcf) |
1682 | nv_wr32(dev, 0x419cb0, 0x00020048); | ||
1683 | else | ||
1684 | nv_wr32(dev, 0x419cb0, 0x00060048); | ||
1682 | nv_wr32(dev, 0x419ce8, 0x00000000); | 1685 | nv_wr32(dev, 0x419ce8, 0x00000000); |
1683 | nv_wr32(dev, 0x419cf4, 0x00000183); | 1686 | nv_wr32(dev, 0x419cf4, 0x00000183); |
1684 | nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000); | 1687 | nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000); |
@@ -1783,11 +1786,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan) | |||
1783 | nv_wr32(dev, 0x40587c, 0x00000000); | 1786 | nv_wr32(dev, 0x40587c, 0x00000000); |
1784 | 1787 | ||
1785 | if (1) { | 1788 | if (1) { |
1786 | const u8 chipset_tp_max[] = { 16, 4, 0, 4, 8, 0, 0, 0, | 1789 | u8 tpnr[GPC_MAX], data[TP_MAX]; |
1787 | 16, 0, 0, 0, 0, 0, 8, 0 }; | ||
1788 | u8 max = chipset_tp_max[dev_priv->chipset & 0x0f]; | ||
1789 | u8 tpnr[GPC_MAX]; | ||
1790 | u8 data[TP_MAX]; | ||
1791 | 1790 | ||
1792 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); | 1791 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); |
1793 | memset(data, 0x1f, sizeof(data)); | 1792 | memset(data, 0x1f, sizeof(data)); |
@@ -1801,7 +1800,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan) | |||
1801 | data[tp] = gpc; | 1800 | data[tp] = gpc; |
1802 | } | 1801 | } |
1803 | 1802 | ||
1804 | for (i = 0; i < max / 4; i++) | 1803 | for (i = 0; i < 4; i++) |
1805 | nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]); | 1804 | nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]); |
1806 | } | 1805 | } |
1807 | 1806 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc index 0ec2add72a76..06f5e26d1e0f 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc +++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc | |||
@@ -77,6 +77,11 @@ chipsets: | |||
77 | .b16 nvc0_gpc_mmio_tail | 77 | .b16 nvc0_gpc_mmio_tail |
78 | .b16 nvc0_tpc_mmio_head | 78 | .b16 nvc0_tpc_mmio_head |
79 | .b16 nvc3_tpc_mmio_tail | 79 | .b16 nvc3_tpc_mmio_tail |
80 | .b8 0xcf 0 0 0 | ||
81 | .b16 nvc0_gpc_mmio_head | ||
82 | .b16 nvc0_gpc_mmio_tail | ||
83 | .b16 nvc0_tpc_mmio_head | ||
84 | .b16 nvcf_tpc_mmio_tail | ||
80 | .b8 0 0 0 0 | 85 | .b8 0 0 0 0 |
81 | 86 | ||
82 | // GPC mmio lists | 87 | // GPC mmio lists |
@@ -134,8 +139,9 @@ mmctx_data(0x000750, 2) | |||
134 | nvc0_tpc_mmio_tail: | 139 | nvc0_tpc_mmio_tail: |
135 | mmctx_data(0x000758, 1) | 140 | mmctx_data(0x000758, 1) |
136 | mmctx_data(0x0002c4, 1) | 141 | mmctx_data(0x0002c4, 1) |
137 | mmctx_data(0x0004bc, 1) | ||
138 | mmctx_data(0x0006e0, 1) | 142 | mmctx_data(0x0006e0, 1) |
143 | nvcf_tpc_mmio_tail: | ||
144 | mmctx_data(0x0004bc, 1) | ||
139 | nvc3_tpc_mmio_tail: | 145 | nvc3_tpc_mmio_tail: |
140 | mmctx_data(0x000544, 1) | 146 | mmctx_data(0x000544, 1) |
141 | nvc1_tpc_mmio_tail: | 147 | nvc1_tpc_mmio_tail: |
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h index 1896c898f5ba..6f820324480e 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h +++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h | |||
@@ -25,23 +25,26 @@ uint32_t nvc0_grgpc_data[] = { | |||
25 | 0x00000000, | 25 | 0x00000000, |
26 | 0x00000000, | 26 | 0x00000000, |
27 | 0x000000c0, | 27 | 0x000000c0, |
28 | 0x011000b0, | 28 | 0x011c00bc, |
29 | 0x01640114, | 29 | 0x01700120, |
30 | 0x000000c1, | 30 | 0x000000c1, |
31 | 0x011400b0, | 31 | 0x012000bc, |
32 | 0x01780114, | 32 | 0x01840120, |
33 | 0x000000c3, | 33 | 0x000000c3, |
34 | 0x011000b0, | 34 | 0x011c00bc, |
35 | 0x01740114, | 35 | 0x01800120, |
36 | 0x000000c4, | 36 | 0x000000c4, |
37 | 0x011000b0, | 37 | 0x011c00bc, |
38 | 0x01740114, | 38 | 0x01800120, |
39 | 0x000000c8, | 39 | 0x000000c8, |
40 | 0x011000b0, | 40 | 0x011c00bc, |
41 | 0x01640114, | 41 | 0x01700120, |
42 | 0x000000ce, | 42 | 0x000000ce, |
43 | 0x011000b0, | 43 | 0x011c00bc, |
44 | 0x01740114, | 44 | 0x01800120, |
45 | 0x000000cf, | ||
46 | 0x011c00bc, | ||
47 | 0x017c0120, | ||
45 | 0x00000000, | 48 | 0x00000000, |
46 | 0x00000380, | 49 | 0x00000380, |
47 | 0x14000400, | 50 | 0x14000400, |
@@ -90,8 +93,8 @@ uint32_t nvc0_grgpc_data[] = { | |||
90 | 0x04000750, | 93 | 0x04000750, |
91 | 0x00000758, | 94 | 0x00000758, |
92 | 0x000002c4, | 95 | 0x000002c4, |
93 | 0x000004bc, | ||
94 | 0x000006e0, | 96 | 0x000006e0, |
97 | 0x000004bc, | ||
95 | 0x00000544, | 98 | 0x00000544, |
96 | }; | 99 | }; |
97 | 100 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc index a1a599124cf4..e4f8c7e89ddd 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc +++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc | |||
@@ -56,6 +56,9 @@ chipsets: | |||
56 | .b8 0xce 0 0 0 | 56 | .b8 0xce 0 0 0 |
57 | .b16 nvc0_hub_mmio_head | 57 | .b16 nvc0_hub_mmio_head |
58 | .b16 nvc0_hub_mmio_tail | 58 | .b16 nvc0_hub_mmio_tail |
59 | .b8 0xcf 0 0 0 | ||
60 | .b16 nvc0_hub_mmio_head | ||
61 | .b16 nvc0_hub_mmio_tail | ||
59 | .b8 0 0 0 0 | 62 | .b8 0 0 0 0 |
60 | 63 | ||
61 | nvc0_hub_mmio_head: | 64 | nvc0_hub_mmio_head: |
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h index b3b541b6d044..241d3263f1e5 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h +++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h | |||
@@ -23,17 +23,19 @@ uint32_t nvc0_grhub_data[] = { | |||
23 | 0x00000000, | 23 | 0x00000000, |
24 | 0x00000000, | 24 | 0x00000000, |
25 | 0x000000c0, | 25 | 0x000000c0, |
26 | 0x012c0090, | 26 | 0x01340098, |
27 | 0x000000c1, | 27 | 0x000000c1, |
28 | 0x01300090, | 28 | 0x01380098, |
29 | 0x000000c3, | 29 | 0x000000c3, |
30 | 0x012c0090, | 30 | 0x01340098, |
31 | 0x000000c4, | 31 | 0x000000c4, |
32 | 0x012c0090, | 32 | 0x01340098, |
33 | 0x000000c8, | 33 | 0x000000c8, |
34 | 0x012c0090, | 34 | 0x01340098, |
35 | 0x000000ce, | 35 | 0x000000ce, |
36 | 0x012c0090, | 36 | 0x01340098, |
37 | 0x000000cf, | ||
38 | 0x01340098, | ||
37 | 0x00000000, | 39 | 0x00000000, |
38 | 0x0417e91c, | 40 | 0x0417e91c, |
39 | 0x04400204, | 41 | 0x04400204, |
@@ -190,8 +192,6 @@ uint32_t nvc0_grhub_data[] = { | |||
190 | 0x00000000, | 192 | 0x00000000, |
191 | 0x00000000, | 193 | 0x00000000, |
192 | 0x00000000, | 194 | 0x00000000, |
193 | 0x00000000, | ||
194 | 0x00000000, | ||
195 | }; | 195 | }; |
196 | 196 | ||
197 | uint32_t nvc0_grhub_code[] = { | 197 | uint32_t nvc0_grhub_code[] = { |
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c new file mode 100644 index 000000000000..929aded35cb5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_pm.c | |||
@@ -0,0 +1,155 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_bios.h" | ||
28 | #include "nouveau_pm.h" | ||
29 | |||
30 | static u32 read_div(struct drm_device *, int, u32, u32); | ||
31 | static u32 read_pll(struct drm_device *, u32); | ||
32 | |||
33 | static u32 | ||
34 | read_vco(struct drm_device *dev, u32 dsrc) | ||
35 | { | ||
36 | u32 ssrc = nv_rd32(dev, dsrc); | ||
37 | if (!(ssrc & 0x00000100)) | ||
38 | return read_pll(dev, 0x00e800); | ||
39 | return read_pll(dev, 0x00e820); | ||
40 | } | ||
41 | |||
42 | static u32 | ||
43 | read_pll(struct drm_device *dev, u32 pll) | ||
44 | { | ||
45 | u32 ctrl = nv_rd32(dev, pll + 0); | ||
46 | u32 coef = nv_rd32(dev, pll + 4); | ||
47 | u32 P = (coef & 0x003f0000) >> 16; | ||
48 | u32 N = (coef & 0x0000ff00) >> 8; | ||
49 | u32 M = (coef & 0x000000ff) >> 0; | ||
50 | u32 sclk, doff; | ||
51 | |||
52 | if (!(ctrl & 0x00000001)) | ||
53 | return 0; | ||
54 | |||
55 | switch (pll & 0xfff000) { | ||
56 | case 0x00e000: | ||
57 | sclk = 27000; | ||
58 | P = 1; | ||
59 | break; | ||
60 | case 0x137000: | ||
61 | doff = (pll - 0x137000) / 0x20; | ||
62 | sclk = read_div(dev, doff, 0x137120, 0x137140); | ||
63 | break; | ||
64 | case 0x132000: | ||
65 | switch (pll) { | ||
66 | case 0x132000: | ||
67 | sclk = read_pll(dev, 0x132020); | ||
68 | break; | ||
69 | case 0x132020: | ||
70 | sclk = read_div(dev, 0, 0x137320, 0x137330); | ||
71 | break; | ||
72 | default: | ||
73 | return 0; | ||
74 | } | ||
75 | break; | ||
76 | default: | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | return sclk * N / M / P; | ||
81 | } | ||
82 | |||
83 | static u32 | ||
84 | read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl) | ||
85 | { | ||
86 | u32 ssrc = nv_rd32(dev, dsrc + (doff * 4)); | ||
87 | u32 sctl = nv_rd32(dev, dctl + (doff * 4)); | ||
88 | |||
89 | switch (ssrc & 0x00000003) { | ||
90 | case 0: | ||
91 | if ((ssrc & 0x00030000) != 0x00030000) | ||
92 | return 27000; | ||
93 | return 108000; | ||
94 | case 2: | ||
95 | return 100000; | ||
96 | case 3: | ||
97 | if (sctl & 0x80000000) { | ||
98 | u32 sclk = read_vco(dev, dsrc + (doff * 4)); | ||
99 | u32 sdiv = (sctl & 0x0000003f) + 2; | ||
100 | return (sclk * 2) / sdiv; | ||
101 | } | ||
102 | |||
103 | return read_vco(dev, dsrc + (doff * 4)); | ||
104 | default: | ||
105 | return 0; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | static u32 | ||
110 | read_mem(struct drm_device *dev) | ||
111 | { | ||
112 | u32 ssel = nv_rd32(dev, 0x1373f0); | ||
113 | if (ssel & 0x00000001) | ||
114 | return read_div(dev, 0, 0x137300, 0x137310); | ||
115 | return read_pll(dev, 0x132000); | ||
116 | } | ||
117 | |||
118 | static u32 | ||
119 | read_clk(struct drm_device *dev, int clk) | ||
120 | { | ||
121 | u32 sctl = nv_rd32(dev, 0x137250 + (clk * 4)); | ||
122 | u32 ssel = nv_rd32(dev, 0x137100); | ||
123 | u32 sclk, sdiv; | ||
124 | |||
125 | if (ssel & (1 << clk)) { | ||
126 | if (clk < 7) | ||
127 | sclk = read_pll(dev, 0x137000 + (clk * 0x20)); | ||
128 | else | ||
129 | sclk = read_pll(dev, 0x1370e0); | ||
130 | sdiv = ((sctl & 0x00003f00) >> 8) + 2; | ||
131 | } else { | ||
132 | sclk = read_div(dev, clk, 0x137160, 0x1371d0); | ||
133 | sdiv = ((sctl & 0x0000003f) >> 0) + 2; | ||
134 | } | ||
135 | |||
136 | if (sctl & 0x80000000) | ||
137 | return (sclk * 2) / sdiv; | ||
138 | return sclk; | ||
139 | } | ||
140 | |||
141 | int | ||
142 | nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) | ||
143 | { | ||
144 | perflvl->shader = read_clk(dev, 0x00); | ||
145 | perflvl->core = perflvl->shader / 2; | ||
146 | perflvl->memory = read_mem(dev); | ||
147 | perflvl->rop = read_clk(dev, 0x01); | ||
148 | perflvl->hub07 = read_clk(dev, 0x02); | ||
149 | perflvl->hub06 = read_clk(dev, 0x07); | ||
150 | perflvl->hub01 = read_clk(dev, 0x08); | ||
151 | perflvl->copy = read_clk(dev, 0x09); | ||
152 | perflvl->daemon = read_clk(dev, 0x0c); | ||
153 | perflvl->vdec = read_clk(dev, 0x0e); | ||
154 | return 0; | ||
155 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c index e45a24d84e98..edbfe9360ae2 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vram.c +++ b/drivers/gpu/drm/nouveau/nvc0_vram.c | |||
@@ -61,7 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin, | |||
61 | u32 type, struct nouveau_mem **pmem) | 61 | u32 type, struct nouveau_mem **pmem) |
62 | { | 62 | { |
63 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 63 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
64 | struct nouveau_mm *mm = dev_priv->engine.vram.mm; | 64 | struct nouveau_mm *mm = &dev_priv->engine.vram.mm; |
65 | struct nouveau_mm_node *r; | 65 | struct nouveau_mm_node *r; |
66 | struct nouveau_mem *mem; | 66 | struct nouveau_mem *mem; |
67 | int ret; | 67 | int ret; |
@@ -106,12 +106,50 @@ nvc0_vram_init(struct drm_device *dev) | |||
106 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | 106 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; |
107 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ | 107 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ |
108 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ | 108 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ |
109 | u32 length; | 109 | u32 parts = nv_rd32(dev, 0x121c74); |
110 | u32 bsize = nv_rd32(dev, 0x10f20c); | ||
111 | u32 offset, length; | ||
112 | bool uniform = true; | ||
113 | int ret, i; | ||
110 | 114 | ||
111 | dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; | 115 | NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); |
112 | dev_priv->vram_size *= nv_rd32(dev, 0x121c74); | 116 | NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); |
113 | 117 | ||
114 | length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail; | 118 | /* read amount of vram attached to each memory controller */ |
119 | for (i = 0; i < parts; i++) { | ||
120 | u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000)); | ||
121 | if (psize != bsize) { | ||
122 | if (psize < bsize) | ||
123 | bsize = psize; | ||
124 | uniform = false; | ||
125 | } | ||
126 | |||
127 | NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize); | ||
128 | |||
129 | dev_priv->vram_size += (u64)psize << 20; | ||
130 | } | ||
131 | |||
132 | /* if all controllers have the same amount attached, there's no holes */ | ||
133 | if (uniform) { | ||
134 | offset = rsvd_head; | ||
135 | length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail; | ||
136 | return nouveau_mm_init(&vram->mm, offset, length, 1); | ||
137 | } | ||
115 | 138 | ||
116 | return nouveau_mm_init(&vram->mm, rsvd_head, length, 1); | 139 | /* otherwise, address lowest common amount from 0GiB */ |
140 | ret = nouveau_mm_init(&vram->mm, rsvd_head, (bsize << 8) * parts, 1); | ||
141 | if (ret) | ||
142 | return ret; | ||
143 | |||
144 | /* and the rest starting from (8GiB + common_size) */ | ||
145 | offset = (0x0200000000ULL >> 12) + (bsize << 8); | ||
146 | length = (dev_priv->vram_size >> 12) - (bsize << 8) - rsvd_tail; | ||
147 | |||
148 | ret = nouveau_mm_init(&vram->mm, offset, length, 0); | ||
149 | if (ret) { | ||
150 | nouveau_mm_fini(&vram->mm); | ||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | return 0; | ||
117 | } | 155 | } |
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c new file mode 100644 index 000000000000..23d63b4b3d77 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvd0_display.c | |||
@@ -0,0 +1,1473 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include <linux/dma-mapping.h> | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm_crtc_helper.h" | ||
29 | |||
30 | #include "nouveau_drv.h" | ||
31 | #include "nouveau_connector.h" | ||
32 | #include "nouveau_encoder.h" | ||
33 | #include "nouveau_crtc.h" | ||
34 | #include "nouveau_dma.h" | ||
35 | #include "nouveau_fb.h" | ||
36 | #include "nv50_display.h" | ||
37 | |||
38 | struct nvd0_display { | ||
39 | struct nouveau_gpuobj *mem; | ||
40 | struct { | ||
41 | dma_addr_t handle; | ||
42 | u32 *ptr; | ||
43 | } evo[1]; | ||
44 | |||
45 | struct tasklet_struct tasklet; | ||
46 | u32 modeset; | ||
47 | }; | ||
48 | |||
49 | static struct nvd0_display * | ||
50 | nvd0_display(struct drm_device *dev) | ||
51 | { | ||
52 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
53 | return dev_priv->engine.display.priv; | ||
54 | } | ||
55 | |||
56 | static inline int | ||
57 | evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data) | ||
58 | { | ||
59 | int ret = 0; | ||
60 | nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001); | ||
61 | nv_wr32(dev, 0x610704 + (id * 0x10), data); | ||
62 | nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd); | ||
63 | if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000)) | ||
64 | ret = -EBUSY; | ||
65 | nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000); | ||
66 | return ret; | ||
67 | } | ||
68 | |||
69 | static u32 * | ||
70 | evo_wait(struct drm_device *dev, int id, int nr) | ||
71 | { | ||
72 | struct nvd0_display *disp = nvd0_display(dev); | ||
73 | u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4; | ||
74 | |||
75 | if (put + nr >= (PAGE_SIZE / 4)) { | ||
76 | disp->evo[id].ptr[put] = 0x20000000; | ||
77 | |||
78 | nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000); | ||
79 | if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) { | ||
80 | NV_ERROR(dev, "evo %d dma stalled\n", id); | ||
81 | return NULL; | ||
82 | } | ||
83 | |||
84 | put = 0; | ||
85 | } | ||
86 | |||
87 | return disp->evo[id].ptr + put; | ||
88 | } | ||
89 | |||
90 | static void | ||
91 | evo_kick(u32 *push, struct drm_device *dev, int id) | ||
92 | { | ||
93 | struct nvd0_display *disp = nvd0_display(dev); | ||
94 | nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2); | ||
95 | } | ||
96 | |||
97 | #define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) | ||
98 | #define evo_data(p,d) *((p)++) = (d) | ||
99 | |||
100 | static struct drm_crtc * | ||
101 | nvd0_display_crtc_get(struct drm_encoder *encoder) | ||
102 | { | ||
103 | return nouveau_encoder(encoder)->crtc; | ||
104 | } | ||
105 | |||
106 | /****************************************************************************** | ||
107 | * CRTC | ||
108 | *****************************************************************************/ | ||
109 | static int | ||
110 | nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update) | ||
111 | { | ||
112 | struct drm_device *dev = nv_crtc->base.dev; | ||
113 | u32 *push, mode; | ||
114 | |||
115 | mode = 0x00000000; | ||
116 | if (on) { | ||
117 | /* 0x11: 6bpc dynamic 2x2 | ||
118 | * 0x13: 8bpc dynamic 2x2 | ||
119 | * 0x19: 6bpc static 2x2 | ||
120 | * 0x1b: 8bpc static 2x2 | ||
121 | * 0x21: 6bpc temporal | ||
122 | * 0x23: 8bpc temporal | ||
123 | */ | ||
124 | mode = 0x00000011; | ||
125 | } | ||
126 | |||
127 | push = evo_wait(dev, 0, 4); | ||
128 | if (push) { | ||
129 | evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1); | ||
130 | evo_data(push, mode); | ||
131 | if (update) { | ||
132 | evo_mthd(push, 0x0080, 1); | ||
133 | evo_data(push, 0x00000000); | ||
134 | } | ||
135 | evo_kick(push, dev, 0); | ||
136 | } | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static int | ||
142 | nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update) | ||
143 | { | ||
144 | struct drm_display_mode *mode = &nv_crtc->base.mode; | ||
145 | struct drm_device *dev = nv_crtc->base.dev; | ||
146 | struct nouveau_connector *nv_connector; | ||
147 | u32 *push, outX, outY; | ||
148 | |||
149 | outX = mode->hdisplay; | ||
150 | outY = mode->vdisplay; | ||
151 | |||
152 | nv_connector = nouveau_crtc_connector_get(nv_crtc); | ||
153 | if (nv_connector && nv_connector->native_mode) { | ||
154 | struct drm_display_mode *native = nv_connector->native_mode; | ||
155 | u32 xratio = (native->hdisplay << 19) / mode->hdisplay; | ||
156 | u32 yratio = (native->vdisplay << 19) / mode->vdisplay; | ||
157 | |||
158 | switch (type) { | ||
159 | case DRM_MODE_SCALE_ASPECT: | ||
160 | if (xratio > yratio) { | ||
161 | outX = (mode->hdisplay * yratio) >> 19; | ||
162 | outY = (mode->vdisplay * yratio) >> 19; | ||
163 | } else { | ||
164 | outX = (mode->hdisplay * xratio) >> 19; | ||
165 | outY = (mode->vdisplay * xratio) >> 19; | ||
166 | } | ||
167 | break; | ||
168 | case DRM_MODE_SCALE_FULLSCREEN: | ||
169 | outX = native->hdisplay; | ||
170 | outY = native->vdisplay; | ||
171 | break; | ||
172 | default: | ||
173 | break; | ||
174 | } | ||
175 | } | ||
176 | |||
177 | push = evo_wait(dev, 0, 16); | ||
178 | if (push) { | ||
179 | evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3); | ||
180 | evo_data(push, (outY << 16) | outX); | ||
181 | evo_data(push, (outY << 16) | outX); | ||
182 | evo_data(push, (outY << 16) | outX); | ||
183 | evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1); | ||
184 | evo_data(push, 0x00000000); | ||
185 | evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1); | ||
186 | evo_data(push, (mode->vdisplay << 16) | mode->hdisplay); | ||
187 | if (update) { | ||
188 | evo_mthd(push, 0x0080, 1); | ||
189 | evo_data(push, 0x00000000); | ||
190 | } | ||
191 | evo_kick(push, dev, 0); | ||
192 | } | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static int | ||
198 | nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, | ||
199 | int x, int y, bool update) | ||
200 | { | ||
201 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb); | ||
202 | u32 *push; | ||
203 | |||
204 | push = evo_wait(fb->dev, 0, 16); | ||
205 | if (push) { | ||
206 | evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); | ||
207 | evo_data(push, nvfb->nvbo->bo.offset >> 8); | ||
208 | evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4); | ||
209 | evo_data(push, (fb->height << 16) | fb->width); | ||
210 | evo_data(push, nvfb->r_pitch); | ||
211 | evo_data(push, nvfb->r_format); | ||
212 | evo_data(push, nvfb->r_dma); | ||
213 | evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1); | ||
214 | evo_data(push, (y << 16) | x); | ||
215 | if (update) { | ||
216 | evo_mthd(push, 0x0080, 1); | ||
217 | evo_data(push, 0x00000000); | ||
218 | } | ||
219 | evo_kick(push, fb->dev, 0); | ||
220 | } | ||
221 | |||
222 | nv_crtc->fb.tile_flags = nvfb->r_dma; | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static void | ||
227 | nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update) | ||
228 | { | ||
229 | struct drm_device *dev = nv_crtc->base.dev; | ||
230 | u32 *push = evo_wait(dev, 0, 16); | ||
231 | if (push) { | ||
232 | if (show) { | ||
233 | evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); | ||
234 | evo_data(push, 0x85000000); | ||
235 | evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); | ||
236 | evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); | ||
237 | evo_data(push, NvEvoVRAM); | ||
238 | } else { | ||
239 | evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1); | ||
240 | evo_data(push, 0x05000000); | ||
241 | evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); | ||
242 | evo_data(push, 0x00000000); | ||
243 | } | ||
244 | |||
245 | if (update) { | ||
246 | evo_mthd(push, 0x0080, 1); | ||
247 | evo_data(push, 0x00000000); | ||
248 | } | ||
249 | |||
250 | evo_kick(push, dev, 0); | ||
251 | } | ||
252 | } | ||
253 | |||
254 | static void | ||
255 | nvd0_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
256 | { | ||
257 | } | ||
258 | |||
259 | static void | ||
260 | nvd0_crtc_prepare(struct drm_crtc *crtc) | ||
261 | { | ||
262 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
263 | u32 *push; | ||
264 | |||
265 | push = evo_wait(crtc->dev, 0, 2); | ||
266 | if (push) { | ||
267 | evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); | ||
268 | evo_data(push, 0x00000000); | ||
269 | evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1); | ||
270 | evo_data(push, 0x03000000); | ||
271 | evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); | ||
272 | evo_data(push, 0x00000000); | ||
273 | evo_kick(push, crtc->dev, 0); | ||
274 | } | ||
275 | |||
276 | nvd0_crtc_cursor_show(nv_crtc, false, false); | ||
277 | } | ||
278 | |||
279 | static void | ||
280 | nvd0_crtc_commit(struct drm_crtc *crtc) | ||
281 | { | ||
282 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
283 | u32 *push; | ||
284 | |||
285 | push = evo_wait(crtc->dev, 0, 32); | ||
286 | if (push) { | ||
287 | evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); | ||
288 | evo_data(push, nv_crtc->fb.tile_flags); | ||
289 | evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4); | ||
290 | evo_data(push, 0x83000000); | ||
291 | evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); | ||
292 | evo_data(push, 0x00000000); | ||
293 | evo_data(push, 0x00000000); | ||
294 | evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); | ||
295 | evo_data(push, NvEvoVRAM); | ||
296 | evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); | ||
297 | evo_data(push, 0xffffff00); | ||
298 | evo_kick(push, crtc->dev, 0); | ||
299 | } | ||
300 | |||
301 | nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true); | ||
302 | } | ||
303 | |||
304 | static bool | ||
305 | nvd0_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, | ||
306 | struct drm_display_mode *adjusted_mode) | ||
307 | { | ||
308 | return true; | ||
309 | } | ||
310 | |||
311 | static int | ||
312 | nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) | ||
313 | { | ||
314 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); | ||
315 | int ret; | ||
316 | |||
317 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); | ||
318 | if (ret) | ||
319 | return ret; | ||
320 | |||
321 | if (old_fb) { | ||
322 | nvfb = nouveau_framebuffer(old_fb); | ||
323 | nouveau_bo_unpin(nvfb->nvbo); | ||
324 | } | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static int | ||
330 | nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, | ||
331 | struct drm_display_mode *mode, int x, int y, | ||
332 | struct drm_framebuffer *old_fb) | ||
333 | { | ||
334 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
335 | struct nouveau_connector *nv_connector; | ||
336 | u32 htotal = mode->htotal; | ||
337 | u32 vtotal = mode->vtotal; | ||
338 | u32 hsyncw = mode->hsync_end - mode->hsync_start - 1; | ||
339 | u32 vsyncw = mode->vsync_end - mode->vsync_start - 1; | ||
340 | u32 hfrntp = mode->hsync_start - mode->hdisplay; | ||
341 | u32 vfrntp = mode->vsync_start - mode->vdisplay; | ||
342 | u32 hbackp = mode->htotal - mode->hsync_end; | ||
343 | u32 vbackp = mode->vtotal - mode->vsync_end; | ||
344 | u32 hss2be = hsyncw + hbackp; | ||
345 | u32 vss2be = vsyncw + vbackp; | ||
346 | u32 hss2de = htotal - hfrntp; | ||
347 | u32 vss2de = vtotal - vfrntp; | ||
348 | u32 syncs, *push; | ||
349 | int ret; | ||
350 | |||
351 | syncs = 0x00000001; | ||
352 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
353 | syncs |= 0x00000008; | ||
354 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
355 | syncs |= 0x00000010; | ||
356 | |||
357 | ret = nvd0_crtc_swap_fbs(crtc, old_fb); | ||
358 | if (ret) | ||
359 | return ret; | ||
360 | |||
361 | push = evo_wait(crtc->dev, 0, 64); | ||
362 | if (push) { | ||
363 | evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5); | ||
364 | evo_data(push, 0x00000000); | ||
365 | evo_data(push, (vtotal << 16) | htotal); | ||
366 | evo_data(push, (vsyncw << 16) | hsyncw); | ||
367 | evo_data(push, (vss2be << 16) | hss2be); | ||
368 | evo_data(push, (vss2de << 16) | hss2de); | ||
369 | evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1); | ||
370 | evo_data(push, 0x00000000); /* ??? */ | ||
371 | evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3); | ||
372 | evo_data(push, mode->clock * 1000); | ||
373 | evo_data(push, 0x00200000); /* ??? */ | ||
374 | evo_data(push, mode->clock * 1000); | ||
375 | evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 1); | ||
376 | evo_data(push, syncs); | ||
377 | evo_kick(push, crtc->dev, 0); | ||
378 | } | ||
379 | |||
380 | nv_connector = nouveau_crtc_connector_get(nv_crtc); | ||
381 | nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false); | ||
382 | nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false); | ||
383 | nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false); | ||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | static int | ||
388 | nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | ||
389 | struct drm_framebuffer *old_fb) | ||
390 | { | ||
391 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
392 | int ret; | ||
393 | |||
394 | if (!crtc->fb) { | ||
395 | NV_DEBUG_KMS(crtc->dev, "No FB bound\n"); | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | ret = nvd0_crtc_swap_fbs(crtc, old_fb); | ||
400 | if (ret) | ||
401 | return ret; | ||
402 | |||
403 | nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true); | ||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | static int | ||
408 | nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc, | ||
409 | struct drm_framebuffer *fb, int x, int y, | ||
410 | enum mode_set_atomic state) | ||
411 | { | ||
412 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
413 | nvd0_crtc_set_image(nv_crtc, fb, x, y, true); | ||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | static void | ||
418 | nvd0_crtc_lut_load(struct drm_crtc *crtc) | ||
419 | { | ||
420 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
421 | void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); | ||
422 | int i; | ||
423 | |||
424 | for (i = 0; i < 256; i++) { | ||
425 | writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0); | ||
426 | writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2); | ||
427 | writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4); | ||
428 | } | ||
429 | } | ||
430 | |||
431 | static int | ||
432 | nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | ||
433 | uint32_t handle, uint32_t width, uint32_t height) | ||
434 | { | ||
435 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
436 | struct drm_device *dev = crtc->dev; | ||
437 | struct drm_gem_object *gem; | ||
438 | struct nouveau_bo *nvbo; | ||
439 | bool visible = (handle != 0); | ||
440 | int i, ret = 0; | ||
441 | |||
442 | if (visible) { | ||
443 | if (width != 64 || height != 64) | ||
444 | return -EINVAL; | ||
445 | |||
446 | gem = drm_gem_object_lookup(dev, file_priv, handle); | ||
447 | if (unlikely(!gem)) | ||
448 | return -ENOENT; | ||
449 | nvbo = nouveau_gem_object(gem); | ||
450 | |||
451 | ret = nouveau_bo_map(nvbo); | ||
452 | if (ret == 0) { | ||
453 | for (i = 0; i < 64 * 64; i++) { | ||
454 | u32 v = nouveau_bo_rd32(nvbo, i); | ||
455 | nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v); | ||
456 | } | ||
457 | nouveau_bo_unmap(nvbo); | ||
458 | } | ||
459 | |||
460 | drm_gem_object_unreference_unlocked(gem); | ||
461 | } | ||
462 | |||
463 | if (visible != nv_crtc->cursor.visible) { | ||
464 | nvd0_crtc_cursor_show(nv_crtc, visible, true); | ||
465 | nv_crtc->cursor.visible = visible; | ||
466 | } | ||
467 | |||
468 | return ret; | ||
469 | } | ||
470 | |||
471 | static int | ||
472 | nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | ||
473 | { | ||
474 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
475 | const u32 data = (y << 16) | x; | ||
476 | |||
477 | nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data); | ||
478 | nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000); | ||
479 | return 0; | ||
480 | } | ||
481 | |||
482 | static void | ||
483 | nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, | ||
484 | uint32_t start, uint32_t size) | ||
485 | { | ||
486 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
487 | u32 end = max(start + size, (u32)256); | ||
488 | u32 i; | ||
489 | |||
490 | for (i = start; i < end; i++) { | ||
491 | nv_crtc->lut.r[i] = r[i]; | ||
492 | nv_crtc->lut.g[i] = g[i]; | ||
493 | nv_crtc->lut.b[i] = b[i]; | ||
494 | } | ||
495 | |||
496 | nvd0_crtc_lut_load(crtc); | ||
497 | } | ||
498 | |||
499 | static void | ||
500 | nvd0_crtc_destroy(struct drm_crtc *crtc) | ||
501 | { | ||
502 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
503 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | ||
504 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | ||
505 | nouveau_bo_unmap(nv_crtc->lut.nvbo); | ||
506 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); | ||
507 | drm_crtc_cleanup(crtc); | ||
508 | kfree(crtc); | ||
509 | } | ||
510 | |||
511 | static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = { | ||
512 | .dpms = nvd0_crtc_dpms, | ||
513 | .prepare = nvd0_crtc_prepare, | ||
514 | .commit = nvd0_crtc_commit, | ||
515 | .mode_fixup = nvd0_crtc_mode_fixup, | ||
516 | .mode_set = nvd0_crtc_mode_set, | ||
517 | .mode_set_base = nvd0_crtc_mode_set_base, | ||
518 | .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic, | ||
519 | .load_lut = nvd0_crtc_lut_load, | ||
520 | }; | ||
521 | |||
522 | static const struct drm_crtc_funcs nvd0_crtc_func = { | ||
523 | .cursor_set = nvd0_crtc_cursor_set, | ||
524 | .cursor_move = nvd0_crtc_cursor_move, | ||
525 | .gamma_set = nvd0_crtc_gamma_set, | ||
526 | .set_config = drm_crtc_helper_set_config, | ||
527 | .destroy = nvd0_crtc_destroy, | ||
528 | }; | ||
529 | |||
530 | static void | ||
531 | nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) | ||
532 | { | ||
533 | } | ||
534 | |||
535 | static void | ||
536 | nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) | ||
537 | { | ||
538 | } | ||
539 | |||
540 | static int | ||
541 | nvd0_crtc_create(struct drm_device *dev, int index) | ||
542 | { | ||
543 | struct nouveau_crtc *nv_crtc; | ||
544 | struct drm_crtc *crtc; | ||
545 | int ret, i; | ||
546 | |||
547 | nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); | ||
548 | if (!nv_crtc) | ||
549 | return -ENOMEM; | ||
550 | |||
551 | nv_crtc->index = index; | ||
552 | nv_crtc->set_dither = nvd0_crtc_set_dither; | ||
553 | nv_crtc->set_scale = nvd0_crtc_set_scale; | ||
554 | nv_crtc->cursor.set_offset = nvd0_cursor_set_offset; | ||
555 | nv_crtc->cursor.set_pos = nvd0_cursor_set_pos; | ||
556 | for (i = 0; i < 256; i++) { | ||
557 | nv_crtc->lut.r[i] = i << 8; | ||
558 | nv_crtc->lut.g[i] = i << 8; | ||
559 | nv_crtc->lut.b[i] = i << 8; | ||
560 | } | ||
561 | |||
562 | crtc = &nv_crtc->base; | ||
563 | drm_crtc_init(dev, crtc, &nvd0_crtc_func); | ||
564 | drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc); | ||
565 | drm_mode_crtc_set_gamma_size(crtc, 256); | ||
566 | |||
567 | ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM, | ||
568 | 0, 0x0000, &nv_crtc->cursor.nvbo); | ||
569 | if (!ret) { | ||
570 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); | ||
571 | if (!ret) | ||
572 | ret = nouveau_bo_map(nv_crtc->cursor.nvbo); | ||
573 | if (ret) | ||
574 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | ||
575 | } | ||
576 | |||
577 | if (ret) | ||
578 | goto out; | ||
579 | |||
580 | ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, | ||
581 | 0, 0x0000, &nv_crtc->lut.nvbo); | ||
582 | if (!ret) { | ||
583 | ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); | ||
584 | if (!ret) | ||
585 | ret = nouveau_bo_map(nv_crtc->lut.nvbo); | ||
586 | if (ret) | ||
587 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); | ||
588 | } | ||
589 | |||
590 | if (ret) | ||
591 | goto out; | ||
592 | |||
593 | nvd0_crtc_lut_load(crtc); | ||
594 | |||
595 | out: | ||
596 | if (ret) | ||
597 | nvd0_crtc_destroy(crtc); | ||
598 | return ret; | ||
599 | } | ||
600 | |||
601 | /****************************************************************************** | ||
602 | * DAC | ||
603 | *****************************************************************************/ | ||
604 | static void | ||
605 | nvd0_dac_dpms(struct drm_encoder *encoder, int mode) | ||
606 | { | ||
607 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
608 | struct drm_device *dev = encoder->dev; | ||
609 | int or = nv_encoder->or; | ||
610 | u32 dpms_ctrl; | ||
611 | |||
612 | dpms_ctrl = 0x80000000; | ||
613 | if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF) | ||
614 | dpms_ctrl |= 0x00000001; | ||
615 | if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) | ||
616 | dpms_ctrl |= 0x00000004; | ||
617 | |||
618 | nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); | ||
619 | nv_mask(dev, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl); | ||
620 | nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); | ||
621 | } | ||
622 | |||
623 | static bool | ||
624 | nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
625 | struct drm_display_mode *adjusted_mode) | ||
626 | { | ||
627 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
628 | struct nouveau_connector *nv_connector; | ||
629 | |||
630 | nv_connector = nouveau_encoder_connector_get(nv_encoder); | ||
631 | if (nv_connector && nv_connector->native_mode) { | ||
632 | if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { | ||
633 | int id = adjusted_mode->base.id; | ||
634 | *adjusted_mode = *nv_connector->native_mode; | ||
635 | adjusted_mode->base.id = id; | ||
636 | } | ||
637 | } | ||
638 | |||
639 | return true; | ||
640 | } | ||
641 | |||
642 | static void | ||
643 | nvd0_dac_prepare(struct drm_encoder *encoder) | ||
644 | { | ||
645 | } | ||
646 | |||
647 | static void | ||
648 | nvd0_dac_commit(struct drm_encoder *encoder) | ||
649 | { | ||
650 | } | ||
651 | |||
652 | static void | ||
653 | nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
654 | struct drm_display_mode *adjusted_mode) | ||
655 | { | ||
656 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
657 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); | ||
658 | u32 *push; | ||
659 | |||
660 | nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON); | ||
661 | |||
662 | push = evo_wait(encoder->dev, 0, 4); | ||
663 | if (push) { | ||
664 | evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2); | ||
665 | evo_data(push, 1 << nv_crtc->index); | ||
666 | evo_data(push, 0x00ff); | ||
667 | evo_kick(push, encoder->dev, 0); | ||
668 | } | ||
669 | |||
670 | nv_encoder->crtc = encoder->crtc; | ||
671 | } | ||
672 | |||
673 | static void | ||
674 | nvd0_dac_disconnect(struct drm_encoder *encoder) | ||
675 | { | ||
676 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
677 | struct drm_device *dev = encoder->dev; | ||
678 | u32 *push; | ||
679 | |||
680 | if (nv_encoder->crtc) { | ||
681 | nvd0_crtc_prepare(nv_encoder->crtc); | ||
682 | |||
683 | push = evo_wait(dev, 0, 4); | ||
684 | if (push) { | ||
685 | evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1); | ||
686 | evo_data(push, 0x00000000); | ||
687 | evo_mthd(push, 0x0080, 1); | ||
688 | evo_data(push, 0x00000000); | ||
689 | evo_kick(push, dev, 0); | ||
690 | } | ||
691 | |||
692 | nv_encoder->crtc = NULL; | ||
693 | } | ||
694 | } | ||
695 | |||
696 | static enum drm_connector_status | ||
697 | nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
698 | { | ||
699 | enum drm_connector_status status = connector_status_disconnected; | ||
700 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
701 | struct drm_device *dev = encoder->dev; | ||
702 | int or = nv_encoder->or; | ||
703 | u32 load; | ||
704 | |||
705 | nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00100000); | ||
706 | udelay(9500); | ||
707 | nv_wr32(dev, 0x61a00c + (or * 0x800), 0x80000000); | ||
708 | |||
709 | load = nv_rd32(dev, 0x61a00c + (or * 0x800)); | ||
710 | if ((load & 0x38000000) == 0x38000000) | ||
711 | status = connector_status_connected; | ||
712 | |||
713 | nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00000000); | ||
714 | return status; | ||
715 | } | ||
716 | |||
717 | static void | ||
718 | nvd0_dac_destroy(struct drm_encoder *encoder) | ||
719 | { | ||
720 | drm_encoder_cleanup(encoder); | ||
721 | kfree(encoder); | ||
722 | } | ||
723 | |||
724 | static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = { | ||
725 | .dpms = nvd0_dac_dpms, | ||
726 | .mode_fixup = nvd0_dac_mode_fixup, | ||
727 | .prepare = nvd0_dac_prepare, | ||
728 | .commit = nvd0_dac_commit, | ||
729 | .mode_set = nvd0_dac_mode_set, | ||
730 | .disable = nvd0_dac_disconnect, | ||
731 | .get_crtc = nvd0_display_crtc_get, | ||
732 | .detect = nvd0_dac_detect | ||
733 | }; | ||
734 | |||
735 | static const struct drm_encoder_funcs nvd0_dac_func = { | ||
736 | .destroy = nvd0_dac_destroy, | ||
737 | }; | ||
738 | |||
739 | static int | ||
740 | nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe) | ||
741 | { | ||
742 | struct drm_device *dev = connector->dev; | ||
743 | struct nouveau_encoder *nv_encoder; | ||
744 | struct drm_encoder *encoder; | ||
745 | |||
746 | nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); | ||
747 | if (!nv_encoder) | ||
748 | return -ENOMEM; | ||
749 | nv_encoder->dcb = dcbe; | ||
750 | nv_encoder->or = ffs(dcbe->or) - 1; | ||
751 | |||
752 | encoder = to_drm_encoder(nv_encoder); | ||
753 | encoder->possible_crtcs = dcbe->heads; | ||
754 | encoder->possible_clones = 0; | ||
755 | drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC); | ||
756 | drm_encoder_helper_add(encoder, &nvd0_dac_hfunc); | ||
757 | |||
758 | drm_mode_connector_attach_encoder(connector, encoder); | ||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | /****************************************************************************** | ||
763 | * SOR | ||
764 | *****************************************************************************/ | ||
765 | static void | ||
766 | nvd0_sor_dpms(struct drm_encoder *encoder, int mode) | ||
767 | { | ||
768 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
769 | struct drm_device *dev = encoder->dev; | ||
770 | struct drm_encoder *partner; | ||
771 | int or = nv_encoder->or; | ||
772 | u32 dpms_ctrl; | ||
773 | |||
774 | nv_encoder->last_dpms = mode; | ||
775 | |||
776 | list_for_each_entry(partner, &dev->mode_config.encoder_list, head) { | ||
777 | struct nouveau_encoder *nv_partner = nouveau_encoder(partner); | ||
778 | |||
779 | if (partner->encoder_type != DRM_MODE_ENCODER_TMDS) | ||
780 | continue; | ||
781 | |||
782 | if (nv_partner != nv_encoder && | ||
783 | nv_partner->dcb->or == nv_encoder->or) { | ||
784 | if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) | ||
785 | return; | ||
786 | break; | ||
787 | } | ||
788 | } | ||
789 | |||
790 | dpms_ctrl = (mode == DRM_MODE_DPMS_ON); | ||
791 | dpms_ctrl |= 0x80000000; | ||
792 | |||
793 | nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); | ||
794 | nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl); | ||
795 | nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); | ||
796 | nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000); | ||
797 | } | ||
798 | |||
799 | static bool | ||
800 | nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
801 | struct drm_display_mode *adjusted_mode) | ||
802 | { | ||
803 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
804 | struct nouveau_connector *nv_connector; | ||
805 | |||
806 | nv_connector = nouveau_encoder_connector_get(nv_encoder); | ||
807 | if (nv_connector && nv_connector->native_mode) { | ||
808 | if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { | ||
809 | int id = adjusted_mode->base.id; | ||
810 | *adjusted_mode = *nv_connector->native_mode; | ||
811 | adjusted_mode->base.id = id; | ||
812 | } | ||
813 | } | ||
814 | |||
815 | return true; | ||
816 | } | ||
817 | |||
818 | static void | ||
819 | nvd0_sor_prepare(struct drm_encoder *encoder) | ||
820 | { | ||
821 | } | ||
822 | |||
823 | static void | ||
824 | nvd0_sor_commit(struct drm_encoder *encoder) | ||
825 | { | ||
826 | } | ||
827 | |||
828 | static void | ||
829 | nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, | ||
830 | struct drm_display_mode *mode) | ||
831 | { | ||
832 | struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; | ||
833 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
834 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); | ||
835 | struct nouveau_connector *nv_connector; | ||
836 | struct nvbios *bios = &dev_priv->vbios; | ||
837 | u32 mode_ctrl = (1 << nv_crtc->index); | ||
838 | u32 *push, or_config; | ||
839 | |||
840 | nv_connector = nouveau_encoder_connector_get(nv_encoder); | ||
841 | switch (nv_encoder->dcb->type) { | ||
842 | case OUTPUT_TMDS: | ||
843 | if (nv_encoder->dcb->sorconf.link & 1) { | ||
844 | if (mode->clock < 165000) | ||
845 | mode_ctrl |= 0x00000100; | ||
846 | else | ||
847 | mode_ctrl |= 0x00000500; | ||
848 | } else { | ||
849 | mode_ctrl |= 0x00000200; | ||
850 | } | ||
851 | |||
852 | or_config = (mode_ctrl & 0x00000f00) >> 8; | ||
853 | if (mode->clock >= 165000) | ||
854 | or_config |= 0x0100; | ||
855 | break; | ||
856 | case OUTPUT_LVDS: | ||
857 | or_config = (mode_ctrl & 0x00000f00) >> 8; | ||
858 | if (bios->fp_no_ddc) { | ||
859 | if (bios->fp.dual_link) | ||
860 | or_config |= 0x0100; | ||
861 | if (bios->fp.if_is_24bit) | ||
862 | or_config |= 0x0200; | ||
863 | } else { | ||
864 | if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) { | ||
865 | if (((u8 *)nv_connector->edid)[121] == 2) | ||
866 | or_config |= 0x0100; | ||
867 | } else | ||
868 | if (mode->clock >= bios->fp.duallink_transition_clk) { | ||
869 | or_config |= 0x0100; | ||
870 | } | ||
871 | |||
872 | if (or_config & 0x0100) { | ||
873 | if (bios->fp.strapless_is_24bit & 2) | ||
874 | or_config |= 0x0200; | ||
875 | } else { | ||
876 | if (bios->fp.strapless_is_24bit & 1) | ||
877 | or_config |= 0x0200; | ||
878 | } | ||
879 | |||
880 | if (nv_connector->base.display_info.bpc == 8) | ||
881 | or_config |= 0x0200; | ||
882 | |||
883 | } | ||
884 | break; | ||
885 | default: | ||
886 | BUG_ON(1); | ||
887 | break; | ||
888 | } | ||
889 | |||
890 | nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON); | ||
891 | |||
892 | push = evo_wait(encoder->dev, 0, 4); | ||
893 | if (push) { | ||
894 | evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2); | ||
895 | evo_data(push, mode_ctrl); | ||
896 | evo_data(push, or_config); | ||
897 | evo_kick(push, encoder->dev, 0); | ||
898 | } | ||
899 | |||
900 | nv_encoder->crtc = encoder->crtc; | ||
901 | } | ||
902 | |||
903 | static void | ||
904 | nvd0_sor_disconnect(struct drm_encoder *encoder) | ||
905 | { | ||
906 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
907 | struct drm_device *dev = encoder->dev; | ||
908 | u32 *push; | ||
909 | |||
910 | if (nv_encoder->crtc) { | ||
911 | nvd0_crtc_prepare(nv_encoder->crtc); | ||
912 | |||
913 | push = evo_wait(dev, 0, 4); | ||
914 | if (push) { | ||
915 | evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1); | ||
916 | evo_data(push, 0x00000000); | ||
917 | evo_mthd(push, 0x0080, 1); | ||
918 | evo_data(push, 0x00000000); | ||
919 | evo_kick(push, dev, 0); | ||
920 | } | ||
921 | |||
922 | nv_encoder->crtc = NULL; | ||
923 | nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; | ||
924 | } | ||
925 | } | ||
926 | |||
927 | static void | ||
928 | nvd0_sor_destroy(struct drm_encoder *encoder) | ||
929 | { | ||
930 | drm_encoder_cleanup(encoder); | ||
931 | kfree(encoder); | ||
932 | } | ||
933 | |||
934 | static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = { | ||
935 | .dpms = nvd0_sor_dpms, | ||
936 | .mode_fixup = nvd0_sor_mode_fixup, | ||
937 | .prepare = nvd0_sor_prepare, | ||
938 | .commit = nvd0_sor_commit, | ||
939 | .mode_set = nvd0_sor_mode_set, | ||
940 | .disable = nvd0_sor_disconnect, | ||
941 | .get_crtc = nvd0_display_crtc_get, | ||
942 | }; | ||
943 | |||
944 | static const struct drm_encoder_funcs nvd0_sor_func = { | ||
945 | .destroy = nvd0_sor_destroy, | ||
946 | }; | ||
947 | |||
948 | static int | ||
949 | nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe) | ||
950 | { | ||
951 | struct drm_device *dev = connector->dev; | ||
952 | struct nouveau_encoder *nv_encoder; | ||
953 | struct drm_encoder *encoder; | ||
954 | |||
955 | nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); | ||
956 | if (!nv_encoder) | ||
957 | return -ENOMEM; | ||
958 | nv_encoder->dcb = dcbe; | ||
959 | nv_encoder->or = ffs(dcbe->or) - 1; | ||
960 | nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; | ||
961 | |||
962 | encoder = to_drm_encoder(nv_encoder); | ||
963 | encoder->possible_crtcs = dcbe->heads; | ||
964 | encoder->possible_clones = 0; | ||
965 | drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS); | ||
966 | drm_encoder_helper_add(encoder, &nvd0_sor_hfunc); | ||
967 | |||
968 | drm_mode_connector_attach_encoder(connector, encoder); | ||
969 | return 0; | ||
970 | } | ||
971 | |||
972 | /****************************************************************************** | ||
973 | * IRQ | ||
974 | *****************************************************************************/ | ||
975 | static struct dcb_entry * | ||
976 | lookup_dcb(struct drm_device *dev, int id, u32 mc) | ||
977 | { | ||
978 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
979 | int type, or, i; | ||
980 | |||
981 | if (id < 4) { | ||
982 | type = OUTPUT_ANALOG; | ||
983 | or = id; | ||
984 | } else { | ||
985 | switch (mc & 0x00000f00) { | ||
986 | case 0x00000000: type = OUTPUT_LVDS; break; | ||
987 | case 0x00000100: type = OUTPUT_TMDS; break; | ||
988 | case 0x00000200: type = OUTPUT_TMDS; break; | ||
989 | case 0x00000500: type = OUTPUT_TMDS; break; | ||
990 | default: | ||
991 | NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc); | ||
992 | return NULL; | ||
993 | } | ||
994 | |||
995 | or = id - 4; | ||
996 | } | ||
997 | |||
998 | for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { | ||
999 | struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; | ||
1000 | if (dcb->type == type && (dcb->or & (1 << or))) | ||
1001 | return dcb; | ||
1002 | } | ||
1003 | |||
1004 | NV_ERROR(dev, "PDISP: DCB for %d/0x%08x not found\n", id, mc); | ||
1005 | return NULL; | ||
1006 | } | ||
1007 | |||
1008 | static void | ||
1009 | nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask) | ||
1010 | { | ||
1011 | struct dcb_entry *dcb; | ||
1012 | int i; | ||
1013 | |||
1014 | for (i = 0; mask && i < 8; i++) { | ||
1015 | u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20)); | ||
1016 | if (!(mcc & (1 << crtc))) | ||
1017 | continue; | ||
1018 | |||
1019 | dcb = lookup_dcb(dev, i, mcc); | ||
1020 | if (!dcb) | ||
1021 | continue; | ||
1022 | |||
1023 | nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc); | ||
1024 | } | ||
1025 | |||
1026 | nv_wr32(dev, 0x6101d4, 0x00000000); | ||
1027 | nv_wr32(dev, 0x6109d4, 0x00000000); | ||
1028 | nv_wr32(dev, 0x6101d0, 0x80000000); | ||
1029 | } | ||
1030 | |||
1031 | static void | ||
1032 | nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask) | ||
1033 | { | ||
1034 | struct dcb_entry *dcb; | ||
1035 | u32 or, tmp, pclk; | ||
1036 | int i; | ||
1037 | |||
1038 | for (i = 0; mask && i < 8; i++) { | ||
1039 | u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20)); | ||
1040 | if (!(mcc & (1 << crtc))) | ||
1041 | continue; | ||
1042 | |||
1043 | dcb = lookup_dcb(dev, i, mcc); | ||
1044 | if (!dcb) | ||
1045 | continue; | ||
1046 | |||
1047 | nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc); | ||
1048 | } | ||
1049 | |||
1050 | pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000; | ||
1051 | if (mask & 0x00010000) { | ||
1052 | nv50_crtc_set_clock(dev, crtc, pclk); | ||
1053 | } | ||
1054 | |||
1055 | for (i = 0; mask && i < 8; i++) { | ||
1056 | u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20)); | ||
1057 | u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20)); | ||
1058 | if (!(mcp & (1 << crtc))) | ||
1059 | continue; | ||
1060 | |||
1061 | dcb = lookup_dcb(dev, i, mcp); | ||
1062 | if (!dcb) | ||
1063 | continue; | ||
1064 | or = ffs(dcb->or) - 1; | ||
1065 | |||
1066 | nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc); | ||
1067 | |||
1068 | nv_wr32(dev, 0x612200 + (crtc * 0x800), 0x00000000); | ||
1069 | switch (dcb->type) { | ||
1070 | case OUTPUT_ANALOG: | ||
1071 | nv_wr32(dev, 0x612280 + (or * 0x800), 0x00000000); | ||
1072 | break; | ||
1073 | case OUTPUT_TMDS: | ||
1074 | case OUTPUT_LVDS: | ||
1075 | if (cfg & 0x00000100) | ||
1076 | tmp = 0x00000101; | ||
1077 | else | ||
1078 | tmp = 0x00000000; | ||
1079 | |||
1080 | nv_mask(dev, 0x612300 + (or * 0x800), 0x00000707, tmp); | ||
1081 | break; | ||
1082 | default: | ||
1083 | break; | ||
1084 | } | ||
1085 | |||
1086 | break; | ||
1087 | } | ||
1088 | |||
1089 | nv_wr32(dev, 0x6101d4, 0x00000000); | ||
1090 | nv_wr32(dev, 0x6109d4, 0x00000000); | ||
1091 | nv_wr32(dev, 0x6101d0, 0x80000000); | ||
1092 | } | ||
1093 | |||
1094 | static void | ||
1095 | nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask) | ||
1096 | { | ||
1097 | struct dcb_entry *dcb; | ||
1098 | int pclk, i; | ||
1099 | |||
1100 | pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000; | ||
1101 | |||
1102 | for (i = 0; mask && i < 8; i++) { | ||
1103 | u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20)); | ||
1104 | u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20)); | ||
1105 | if (!(mcp & (1 << crtc))) | ||
1106 | continue; | ||
1107 | |||
1108 | dcb = lookup_dcb(dev, i, mcp); | ||
1109 | if (!dcb) | ||
1110 | continue; | ||
1111 | |||
1112 | nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc); | ||
1113 | } | ||
1114 | |||
1115 | nv_wr32(dev, 0x6101d4, 0x00000000); | ||
1116 | nv_wr32(dev, 0x6109d4, 0x00000000); | ||
1117 | nv_wr32(dev, 0x6101d0, 0x80000000); | ||
1118 | } | ||
1119 | |||
1120 | static void | ||
1121 | nvd0_display_bh(unsigned long data) | ||
1122 | { | ||
1123 | struct drm_device *dev = (struct drm_device *)data; | ||
1124 | struct nvd0_display *disp = nvd0_display(dev); | ||
1125 | u32 mask, crtc; | ||
1126 | int i; | ||
1127 | |||
1128 | if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) { | ||
1129 | NV_INFO(dev, "PDISP: modeset req %d\n", disp->modeset); | ||
1130 | NV_INFO(dev, " STAT: 0x%08x 0x%08x 0x%08x\n", | ||
1131 | nv_rd32(dev, 0x6101d0), | ||
1132 | nv_rd32(dev, 0x6101d4), nv_rd32(dev, 0x6109d4)); | ||
1133 | for (i = 0; i < 8; i++) { | ||
1134 | NV_INFO(dev, " %s%d: 0x%08x 0x%08x\n", | ||
1135 | i < 4 ? "DAC" : "SOR", i, | ||
1136 | nv_rd32(dev, 0x640180 + (i * 0x20)), | ||
1137 | nv_rd32(dev, 0x660180 + (i * 0x20))); | ||
1138 | } | ||
1139 | } | ||
1140 | |||
1141 | mask = nv_rd32(dev, 0x6101d4); | ||
1142 | crtc = 0; | ||
1143 | if (!mask) { | ||
1144 | mask = nv_rd32(dev, 0x6109d4); | ||
1145 | crtc = 1; | ||
1146 | } | ||
1147 | |||
1148 | if (disp->modeset & 0x00000001) | ||
1149 | nvd0_display_unk1_handler(dev, crtc, mask); | ||
1150 | if (disp->modeset & 0x00000002) | ||
1151 | nvd0_display_unk2_handler(dev, crtc, mask); | ||
1152 | if (disp->modeset & 0x00000004) | ||
1153 | nvd0_display_unk4_handler(dev, crtc, mask); | ||
1154 | } | ||
1155 | |||
1156 | static void | ||
1157 | nvd0_display_intr(struct drm_device *dev) | ||
1158 | { | ||
1159 | struct nvd0_display *disp = nvd0_display(dev); | ||
1160 | u32 intr = nv_rd32(dev, 0x610088); | ||
1161 | |||
1162 | if (intr & 0x00000002) { | ||
1163 | u32 stat = nv_rd32(dev, 0x61009c); | ||
1164 | int chid = ffs(stat) - 1; | ||
1165 | if (chid >= 0) { | ||
1166 | u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12)); | ||
1167 | u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12)); | ||
1168 | u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12)); | ||
1169 | |||
1170 | NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x " | ||
1171 | "0x%08x 0x%08x\n", | ||
1172 | chid, (mthd & 0x0000ffc), data, mthd, unkn); | ||
1173 | nv_wr32(dev, 0x61009c, (1 << chid)); | ||
1174 | nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000); | ||
1175 | } | ||
1176 | |||
1177 | intr &= ~0x00000002; | ||
1178 | } | ||
1179 | |||
1180 | if (intr & 0x00100000) { | ||
1181 | u32 stat = nv_rd32(dev, 0x6100ac); | ||
1182 | |||
1183 | if (stat & 0x00000007) { | ||
1184 | disp->modeset = stat; | ||
1185 | tasklet_schedule(&disp->tasklet); | ||
1186 | |||
1187 | nv_wr32(dev, 0x6100ac, (stat & 0x00000007)); | ||
1188 | stat &= ~0x00000007; | ||
1189 | } | ||
1190 | |||
1191 | if (stat) { | ||
1192 | NV_INFO(dev, "PDISP: unknown intr24 0x%08x\n", stat); | ||
1193 | nv_wr32(dev, 0x6100ac, stat); | ||
1194 | } | ||
1195 | |||
1196 | intr &= ~0x00100000; | ||
1197 | } | ||
1198 | |||
1199 | if (intr & 0x01000000) { | ||
1200 | u32 stat = nv_rd32(dev, 0x6100bc); | ||
1201 | nv_wr32(dev, 0x6100bc, stat); | ||
1202 | intr &= ~0x01000000; | ||
1203 | } | ||
1204 | |||
1205 | if (intr & 0x02000000) { | ||
1206 | u32 stat = nv_rd32(dev, 0x6108bc); | ||
1207 | nv_wr32(dev, 0x6108bc, stat); | ||
1208 | intr &= ~0x02000000; | ||
1209 | } | ||
1210 | |||
1211 | if (intr) | ||
1212 | NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr); | ||
1213 | } | ||
1214 | |||
1215 | /****************************************************************************** | ||
1216 | * Init | ||
1217 | *****************************************************************************/ | ||
1218 | static void | ||
1219 | nvd0_display_fini(struct drm_device *dev) | ||
1220 | { | ||
1221 | int i; | ||
1222 | |||
1223 | /* fini cursors */ | ||
1224 | for (i = 14; i >= 13; i--) { | ||
1225 | if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001)) | ||
1226 | continue; | ||
1227 | |||
1228 | nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000); | ||
1229 | nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000); | ||
1230 | nv_mask(dev, 0x610090, 1 << i, 0x00000000); | ||
1231 | nv_mask(dev, 0x6100a0, 1 << i, 0x00000000); | ||
1232 | } | ||
1233 | |||
1234 | /* fini master */ | ||
1235 | if (nv_rd32(dev, 0x610490) & 0x00000010) { | ||
1236 | nv_mask(dev, 0x610490, 0x00000010, 0x00000000); | ||
1237 | nv_mask(dev, 0x610490, 0x00000003, 0x00000000); | ||
1238 | nv_wait(dev, 0x610490, 0x80000000, 0x00000000); | ||
1239 | nv_mask(dev, 0x610090, 0x00000001, 0x00000000); | ||
1240 | nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000); | ||
1241 | } | ||
1242 | } | ||
1243 | |||
1244 | int | ||
1245 | nvd0_display_init(struct drm_device *dev) | ||
1246 | { | ||
1247 | struct nvd0_display *disp = nvd0_display(dev); | ||
1248 | u32 *push; | ||
1249 | int i; | ||
1250 | |||
1251 | if (nv_rd32(dev, 0x6100ac) & 0x00000100) { | ||
1252 | nv_wr32(dev, 0x6100ac, 0x00000100); | ||
1253 | nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000); | ||
1254 | if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) { | ||
1255 | NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n", | ||
1256 | nv_rd32(dev, 0x6194e8)); | ||
1257 | return -EBUSY; | ||
1258 | } | ||
1259 | } | ||
1260 | |||
1261 | /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't | ||
1262 | * work at all unless you do the SOR part below. | ||
1263 | */ | ||
1264 | for (i = 0; i < 3; i++) { | ||
1265 | u32 dac = nv_rd32(dev, 0x61a000 + (i * 0x800)); | ||
1266 | nv_wr32(dev, 0x6101c0 + (i * 0x800), dac); | ||
1267 | } | ||
1268 | |||
1269 | for (i = 0; i < 4; i++) { | ||
1270 | u32 sor = nv_rd32(dev, 0x61c000 + (i * 0x800)); | ||
1271 | nv_wr32(dev, 0x6301c4 + (i * 0x800), sor); | ||
1272 | } | ||
1273 | |||
1274 | for (i = 0; i < 2; i++) { | ||
1275 | u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800)); | ||
1276 | u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800)); | ||
1277 | u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800)); | ||
1278 | nv_wr32(dev, 0x6101b4 + (i * 0x800), crtc0); | ||
1279 | nv_wr32(dev, 0x6101b8 + (i * 0x800), crtc1); | ||
1280 | nv_wr32(dev, 0x6101bc + (i * 0x800), crtc2); | ||
1281 | } | ||
1282 | |||
1283 | /* point at our hash table / objects, enable interrupts */ | ||
1284 | nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9); | ||
1285 | nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307); | ||
1286 | |||
1287 | /* init master */ | ||
1288 | nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3); | ||
1289 | nv_wr32(dev, 0x610498, 0x00010000); | ||
1290 | nv_wr32(dev, 0x61049c, 0x00000001); | ||
1291 | nv_mask(dev, 0x610490, 0x00000010, 0x00000010); | ||
1292 | nv_wr32(dev, 0x640000, 0x00000000); | ||
1293 | nv_wr32(dev, 0x610490, 0x01000013); | ||
1294 | if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) { | ||
1295 | NV_ERROR(dev, "PDISP: master 0x%08x\n", | ||
1296 | nv_rd32(dev, 0x610490)); | ||
1297 | return -EBUSY; | ||
1298 | } | ||
1299 | nv_mask(dev, 0x610090, 0x00000001, 0x00000001); | ||
1300 | nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001); | ||
1301 | |||
1302 | /* init cursors */ | ||
1303 | for (i = 13; i <= 14; i++) { | ||
1304 | nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001); | ||
1305 | if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) { | ||
1306 | NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i, | ||
1307 | nv_rd32(dev, 0x610490 + (i * 0x10))); | ||
1308 | return -EBUSY; | ||
1309 | } | ||
1310 | |||
1311 | nv_mask(dev, 0x610090, 1 << i, 1 << i); | ||
1312 | nv_mask(dev, 0x6100a0, 1 << i, 1 << i); | ||
1313 | } | ||
1314 | |||
1315 | push = evo_wait(dev, 0, 32); | ||
1316 | if (!push) | ||
1317 | return -EBUSY; | ||
1318 | evo_mthd(push, 0x0088, 1); | ||
1319 | evo_data(push, NvEvoSync); | ||
1320 | evo_mthd(push, 0x0084, 1); | ||
1321 | evo_data(push, 0x00000000); | ||
1322 | evo_mthd(push, 0x0084, 1); | ||
1323 | evo_data(push, 0x80000000); | ||
1324 | evo_mthd(push, 0x008c, 1); | ||
1325 | evo_data(push, 0x00000000); | ||
1326 | evo_kick(push, dev, 0); | ||
1327 | |||
1328 | return 0; | ||
1329 | } | ||
1330 | |||
1331 | void | ||
1332 | nvd0_display_destroy(struct drm_device *dev) | ||
1333 | { | ||
1334 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1335 | struct nvd0_display *disp = nvd0_display(dev); | ||
1336 | struct pci_dev *pdev = dev->pdev; | ||
1337 | |||
1338 | nvd0_display_fini(dev); | ||
1339 | |||
1340 | pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle); | ||
1341 | nouveau_gpuobj_ref(NULL, &disp->mem); | ||
1342 | nouveau_irq_unregister(dev, 26); | ||
1343 | |||
1344 | dev_priv->engine.display.priv = NULL; | ||
1345 | kfree(disp); | ||
1346 | } | ||
1347 | |||
1348 | int | ||
1349 | nvd0_display_create(struct drm_device *dev) | ||
1350 | { | ||
1351 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1352 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | ||
1353 | struct dcb_table *dcb = &dev_priv->vbios.dcb; | ||
1354 | struct drm_connector *connector, *tmp; | ||
1355 | struct pci_dev *pdev = dev->pdev; | ||
1356 | struct nvd0_display *disp; | ||
1357 | struct dcb_entry *dcbe; | ||
1358 | int ret, i; | ||
1359 | |||
1360 | disp = kzalloc(sizeof(*disp), GFP_KERNEL); | ||
1361 | if (!disp) | ||
1362 | return -ENOMEM; | ||
1363 | dev_priv->engine.display.priv = disp; | ||
1364 | |||
1365 | /* create crtc objects to represent the hw heads */ | ||
1366 | for (i = 0; i < 2; i++) { | ||
1367 | ret = nvd0_crtc_create(dev, i); | ||
1368 | if (ret) | ||
1369 | goto out; | ||
1370 | } | ||
1371 | |||
1372 | /* create encoder/connector objects based on VBIOS DCB table */ | ||
1373 | for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) { | ||
1374 | connector = nouveau_connector_create(dev, dcbe->connector); | ||
1375 | if (IS_ERR(connector)) | ||
1376 | continue; | ||
1377 | |||
1378 | if (dcbe->location != DCB_LOC_ON_CHIP) { | ||
1379 | NV_WARN(dev, "skipping off-chip encoder %d/%d\n", | ||
1380 | dcbe->type, ffs(dcbe->or) - 1); | ||
1381 | continue; | ||
1382 | } | ||
1383 | |||
1384 | switch (dcbe->type) { | ||
1385 | case OUTPUT_TMDS: | ||
1386 | case OUTPUT_LVDS: | ||
1387 | nvd0_sor_create(connector, dcbe); | ||
1388 | break; | ||
1389 | case OUTPUT_ANALOG: | ||
1390 | nvd0_dac_create(connector, dcbe); | ||
1391 | break; | ||
1392 | default: | ||
1393 | NV_WARN(dev, "skipping unsupported encoder %d/%d\n", | ||
1394 | dcbe->type, ffs(dcbe->or) - 1); | ||
1395 | continue; | ||
1396 | } | ||
1397 | } | ||
1398 | |||
1399 | /* cull any connectors we created that don't have an encoder */ | ||
1400 | list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { | ||
1401 | if (connector->encoder_ids[0]) | ||
1402 | continue; | ||
1403 | |||
1404 | NV_WARN(dev, "%s has no encoders, removing\n", | ||
1405 | drm_get_connector_name(connector)); | ||
1406 | connector->funcs->destroy(connector); | ||
1407 | } | ||
1408 | |||
1409 | /* setup interrupt handling */ | ||
1410 | tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev); | ||
1411 | nouveau_irq_register(dev, 26, nvd0_display_intr); | ||
1412 | |||
1413 | /* hash table and dma objects for the memory areas we care about */ | ||
1414 | ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000, | ||
1415 | NVOBJ_FLAG_ZERO_ALLOC, &disp->mem); | ||
1416 | if (ret) | ||
1417 | goto out; | ||
1418 | |||
1419 | nv_wo32(disp->mem, 0x1000, 0x00000049); | ||
1420 | nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8); | ||
1421 | nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8); | ||
1422 | nv_wo32(disp->mem, 0x100c, 0x00000000); | ||
1423 | nv_wo32(disp->mem, 0x1010, 0x00000000); | ||
1424 | nv_wo32(disp->mem, 0x1014, 0x00000000); | ||
1425 | nv_wo32(disp->mem, 0x0000, NvEvoSync); | ||
1426 | nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001); | ||
1427 | |||
1428 | nv_wo32(disp->mem, 0x1020, 0x00000049); | ||
1429 | nv_wo32(disp->mem, 0x1024, 0x00000000); | ||
1430 | nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8); | ||
1431 | nv_wo32(disp->mem, 0x102c, 0x00000000); | ||
1432 | nv_wo32(disp->mem, 0x1030, 0x00000000); | ||
1433 | nv_wo32(disp->mem, 0x1034, 0x00000000); | ||
1434 | nv_wo32(disp->mem, 0x0008, NvEvoVRAM); | ||
1435 | nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001); | ||
1436 | |||
1437 | nv_wo32(disp->mem, 0x1040, 0x00000009); | ||
1438 | nv_wo32(disp->mem, 0x1044, 0x00000000); | ||
1439 | nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8); | ||
1440 | nv_wo32(disp->mem, 0x104c, 0x00000000); | ||
1441 | nv_wo32(disp->mem, 0x1050, 0x00000000); | ||
1442 | nv_wo32(disp->mem, 0x1054, 0x00000000); | ||
1443 | nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP); | ||
1444 | nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001); | ||
1445 | |||
1446 | nv_wo32(disp->mem, 0x1060, 0x0fe00009); | ||
1447 | nv_wo32(disp->mem, 0x1064, 0x00000000); | ||
1448 | nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8); | ||
1449 | nv_wo32(disp->mem, 0x106c, 0x00000000); | ||
1450 | nv_wo32(disp->mem, 0x1070, 0x00000000); | ||
1451 | nv_wo32(disp->mem, 0x1074, 0x00000000); | ||
1452 | nv_wo32(disp->mem, 0x0018, NvEvoFB32); | ||
1453 | nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001); | ||
1454 | |||
1455 | pinstmem->flush(dev); | ||
1456 | |||
1457 | /* push buffers for evo channels */ | ||
1458 | disp->evo[0].ptr = | ||
1459 | pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle); | ||
1460 | if (!disp->evo[0].ptr) { | ||
1461 | ret = -ENOMEM; | ||
1462 | goto out; | ||
1463 | } | ||
1464 | |||
1465 | ret = nvd0_display_init(dev); | ||
1466 | if (ret) | ||
1467 | goto out; | ||
1468 | |||
1469 | out: | ||
1470 | if (ret) | ||
1471 | nvd0_display_destroy(dev); | ||
1472 | return ret; | ||
1473 | } | ||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index c4ffa14fb2f4..ed406e8404a3 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | static void evergreen_gpu_init(struct radeon_device *rdev); | 40 | static void evergreen_gpu_init(struct radeon_device *rdev); |
41 | void evergreen_fini(struct radeon_device *rdev); | 41 | void evergreen_fini(struct radeon_device *rdev); |
42 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | 42 | void evergreen_pcie_gen2_enable(struct radeon_device *rdev); |
43 | 43 | ||
44 | void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) | 44 | void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) |
45 | { | 45 | { |
@@ -935,6 +935,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) | |||
935 | WREG32(VM_CONTEXT1_CNTL, 0); | 935 | WREG32(VM_CONTEXT1_CNTL, 0); |
936 | 936 | ||
937 | evergreen_pcie_gart_tlb_flush(rdev); | 937 | evergreen_pcie_gart_tlb_flush(rdev); |
938 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | ||
939 | (unsigned)(rdev->mc.gtt_size >> 20), | ||
940 | (unsigned long long)rdev->gart.table_addr); | ||
938 | rdev->gart.ready = true; | 941 | rdev->gart.ready = true; |
939 | return 0; | 942 | return 0; |
940 | } | 943 | } |
@@ -2586,7 +2589,7 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
2586 | return 0; | 2589 | return 0; |
2587 | } | 2590 | } |
2588 | 2591 | ||
2589 | static inline void evergreen_irq_ack(struct radeon_device *rdev) | 2592 | static void evergreen_irq_ack(struct radeon_device *rdev) |
2590 | { | 2593 | { |
2591 | u32 tmp; | 2594 | u32 tmp; |
2592 | 2595 | ||
@@ -2697,7 +2700,7 @@ void evergreen_irq_suspend(struct radeon_device *rdev) | |||
2697 | r600_rlc_stop(rdev); | 2700 | r600_rlc_stop(rdev); |
2698 | } | 2701 | } |
2699 | 2702 | ||
2700 | static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) | 2703 | static u32 evergreen_get_ih_wptr(struct radeon_device *rdev) |
2701 | { | 2704 | { |
2702 | u32 wptr, tmp; | 2705 | u32 wptr, tmp; |
2703 | 2706 | ||
@@ -3003,8 +3006,7 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
3003 | int r; | 3006 | int r; |
3004 | 3007 | ||
3005 | /* enable pcie gen2 link */ | 3008 | /* enable pcie gen2 link */ |
3006 | if (!ASIC_IS_DCE5(rdev)) | 3009 | evergreen_pcie_gen2_enable(rdev); |
3007 | evergreen_pcie_gen2_enable(rdev); | ||
3008 | 3010 | ||
3009 | if (ASIC_IS_DCE5(rdev)) { | 3011 | if (ASIC_IS_DCE5(rdev)) { |
3010 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | 3012 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { |
@@ -3041,7 +3043,7 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
3041 | 3043 | ||
3042 | r = evergreen_blit_init(rdev); | 3044 | r = evergreen_blit_init(rdev); |
3043 | if (r) { | 3045 | if (r) { |
3044 | evergreen_blit_fini(rdev); | 3046 | r600_blit_fini(rdev); |
3045 | rdev->asic->copy = NULL; | 3047 | rdev->asic->copy = NULL; |
3046 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 3048 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
3047 | } | 3049 | } |
@@ -3107,45 +3109,14 @@ int evergreen_resume(struct radeon_device *rdev) | |||
3107 | 3109 | ||
3108 | int evergreen_suspend(struct radeon_device *rdev) | 3110 | int evergreen_suspend(struct radeon_device *rdev) |
3109 | { | 3111 | { |
3110 | int r; | ||
3111 | |||
3112 | /* FIXME: we should wait for ring to be empty */ | 3112 | /* FIXME: we should wait for ring to be empty */ |
3113 | r700_cp_stop(rdev); | 3113 | r700_cp_stop(rdev); |
3114 | rdev->cp.ready = false; | 3114 | rdev->cp.ready = false; |
3115 | evergreen_irq_suspend(rdev); | 3115 | evergreen_irq_suspend(rdev); |
3116 | radeon_wb_disable(rdev); | 3116 | radeon_wb_disable(rdev); |
3117 | evergreen_pcie_gart_disable(rdev); | 3117 | evergreen_pcie_gart_disable(rdev); |
3118 | r600_blit_suspend(rdev); | ||
3118 | 3119 | ||
3119 | /* unpin shaders bo */ | ||
3120 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
3121 | if (likely(r == 0)) { | ||
3122 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
3123 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
3124 | } | ||
3125 | |||
3126 | return 0; | ||
3127 | } | ||
3128 | |||
3129 | int evergreen_copy_blit(struct radeon_device *rdev, | ||
3130 | uint64_t src_offset, | ||
3131 | uint64_t dst_offset, | ||
3132 | unsigned num_gpu_pages, | ||
3133 | struct radeon_fence *fence) | ||
3134 | { | ||
3135 | int r; | ||
3136 | |||
3137 | mutex_lock(&rdev->r600_blit.mutex); | ||
3138 | rdev->r600_blit.vb_ib = NULL; | ||
3139 | r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); | ||
3140 | if (r) { | ||
3141 | if (rdev->r600_blit.vb_ib) | ||
3142 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | ||
3143 | mutex_unlock(&rdev->r600_blit.mutex); | ||
3144 | return r; | ||
3145 | } | ||
3146 | evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); | ||
3147 | evergreen_blit_done_copy(rdev, fence); | ||
3148 | mutex_unlock(&rdev->r600_blit.mutex); | ||
3149 | return 0; | 3120 | return 0; |
3150 | } | 3121 | } |
3151 | 3122 | ||
@@ -3257,7 +3228,7 @@ int evergreen_init(struct radeon_device *rdev) | |||
3257 | 3228 | ||
3258 | void evergreen_fini(struct radeon_device *rdev) | 3229 | void evergreen_fini(struct radeon_device *rdev) |
3259 | { | 3230 | { |
3260 | evergreen_blit_fini(rdev); | 3231 | r600_blit_fini(rdev); |
3261 | r700_cp_fini(rdev); | 3232 | r700_cp_fini(rdev); |
3262 | r600_irq_fini(rdev); | 3233 | r600_irq_fini(rdev); |
3263 | radeon_wb_fini(rdev); | 3234 | radeon_wb_fini(rdev); |
@@ -3273,7 +3244,7 @@ void evergreen_fini(struct radeon_device *rdev) | |||
3273 | rdev->bios = NULL; | 3244 | rdev->bios = NULL; |
3274 | } | 3245 | } |
3275 | 3246 | ||
3276 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) | 3247 | void evergreen_pcie_gen2_enable(struct radeon_device *rdev) |
3277 | { | 3248 | { |
3278 | u32 link_width_cntl, speed_cntl; | 3249 | u32 link_width_cntl, speed_cntl; |
3279 | 3250 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 2eb251858e72..dcf11bbc06d9 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -56,7 +56,9 @@ set_render_target(struct radeon_device *rdev, int format, | |||
56 | if (h < 8) | 56 | if (h < 8) |
57 | h = 8; | 57 | h = 8; |
58 | 58 | ||
59 | cb_color_info = ((format << 2) | (1 << 24) | (1 << 8)); | 59 | cb_color_info = CB_FORMAT(format) | |
60 | CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) | | ||
61 | CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
60 | pitch = (w / 8) - 1; | 62 | pitch = (w / 8) - 1; |
61 | slice = ((w * h) / 64) - 1; | 63 | slice = ((w * h) / 64) - 1; |
62 | 64 | ||
@@ -67,7 +69,7 @@ set_render_target(struct radeon_device *rdev, int format, | |||
67 | radeon_ring_write(rdev, slice); | 69 | radeon_ring_write(rdev, slice); |
68 | radeon_ring_write(rdev, 0); | 70 | radeon_ring_write(rdev, 0); |
69 | radeon_ring_write(rdev, cb_color_info); | 71 | radeon_ring_write(rdev, cb_color_info); |
70 | radeon_ring_write(rdev, (1 << 4)); | 72 | radeon_ring_write(rdev, 0); |
71 | radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16)); | 73 | radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16)); |
72 | radeon_ring_write(rdev, 0); | 74 | radeon_ring_write(rdev, 0); |
73 | radeon_ring_write(rdev, 0); | 75 | radeon_ring_write(rdev, 0); |
@@ -133,12 +135,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
133 | u32 sq_vtx_constant_word2, sq_vtx_constant_word3; | 135 | u32 sq_vtx_constant_word2, sq_vtx_constant_word3; |
134 | 136 | ||
135 | /* high addr, stride */ | 137 | /* high addr, stride */ |
136 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); | 138 | sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | |
139 | SQ_VTXC_STRIDE(16); | ||
137 | #ifdef __BIG_ENDIAN | 140 | #ifdef __BIG_ENDIAN |
138 | sq_vtx_constant_word2 |= (2 << 30); | 141 | sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); |
139 | #endif | 142 | #endif |
140 | /* xyzw swizzles */ | 143 | /* xyzw swizzles */ |
141 | sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); | 144 | sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) | |
145 | SQ_VTCX_SEL_Y(SQ_SEL_Y) | | ||
146 | SQ_VTCX_SEL_Z(SQ_SEL_Z) | | ||
147 | SQ_VTCX_SEL_W(SQ_SEL_W); | ||
142 | 148 | ||
143 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); | 149 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); |
144 | radeon_ring_write(rdev, 0x580); | 150 | radeon_ring_write(rdev, 0x580); |
@@ -149,7 +155,7 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
149 | radeon_ring_write(rdev, 0); | 155 | radeon_ring_write(rdev, 0); |
150 | radeon_ring_write(rdev, 0); | 156 | radeon_ring_write(rdev, 0); |
151 | radeon_ring_write(rdev, 0); | 157 | radeon_ring_write(rdev, 0); |
152 | radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); | 158 | radeon_ring_write(rdev, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER)); |
153 | 159 | ||
154 | if ((rdev->family == CHIP_CEDAR) || | 160 | if ((rdev->family == CHIP_CEDAR) || |
155 | (rdev->family == CHIP_PALM) || | 161 | (rdev->family == CHIP_PALM) || |
@@ -176,14 +182,19 @@ set_tex_resource(struct radeon_device *rdev, | |||
176 | if (h < 1) | 182 | if (h < 1) |
177 | h = 1; | 183 | h = 1; |
178 | 184 | ||
179 | sq_tex_resource_word0 = (1 << 0); /* 2D */ | 185 | sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D); |
180 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | | 186 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | |
181 | ((w - 1) << 18)); | 187 | ((w - 1) << 18)); |
182 | sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28); | 188 | sq_tex_resource_word1 = ((h - 1) << 0) | |
189 | TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
183 | /* xyzw swizzles */ | 190 | /* xyzw swizzles */ |
184 | sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); | 191 | sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) | |
192 | TEX_DST_SEL_Y(SQ_SEL_Y) | | ||
193 | TEX_DST_SEL_Z(SQ_SEL_Z) | | ||
194 | TEX_DST_SEL_W(SQ_SEL_W); | ||
185 | 195 | ||
186 | sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30); | 196 | sq_tex_resource_word7 = format | |
197 | S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE); | ||
187 | 198 | ||
188 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); | 199 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); |
189 | radeon_ring_write(rdev, 0); | 200 | radeon_ring_write(rdev, 0); |
@@ -584,31 +595,6 @@ set_default_state(struct radeon_device *rdev) | |||
584 | 595 | ||
585 | } | 596 | } |
586 | 597 | ||
587 | static inline uint32_t i2f(uint32_t input) | ||
588 | { | ||
589 | u32 result, i, exponent, fraction; | ||
590 | |||
591 | if ((input & 0x3fff) == 0) | ||
592 | result = 0; /* 0 is a special case */ | ||
593 | else { | ||
594 | exponent = 140; /* exponent biased by 127; */ | ||
595 | fraction = (input & 0x3fff) << 10; /* cheat and only | ||
596 | handle numbers below 2^^15 */ | ||
597 | for (i = 0; i < 14; i++) { | ||
598 | if (fraction & 0x800000) | ||
599 | break; | ||
600 | else { | ||
601 | fraction = fraction << 1; /* keep | ||
602 | shifting left until top bit = 1 */ | ||
603 | exponent = exponent - 1; | ||
604 | } | ||
605 | } | ||
606 | result = exponent << 23 | (fraction & 0x7fffff); /* mask | ||
607 | off top bit; assumed 1 */ | ||
608 | } | ||
609 | return result; | ||
610 | } | ||
611 | |||
612 | int evergreen_blit_init(struct radeon_device *rdev) | 598 | int evergreen_blit_init(struct radeon_device *rdev) |
613 | { | 599 | { |
614 | u32 obj_size; | 600 | u32 obj_size; |
@@ -617,6 +603,24 @@ int evergreen_blit_init(struct radeon_device *rdev) | |||
617 | u32 packet2s[16]; | 603 | u32 packet2s[16]; |
618 | int num_packet2s = 0; | 604 | int num_packet2s = 0; |
619 | 605 | ||
606 | rdev->r600_blit.primitives.set_render_target = set_render_target; | ||
607 | rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync; | ||
608 | rdev->r600_blit.primitives.set_shaders = set_shaders; | ||
609 | rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource; | ||
610 | rdev->r600_blit.primitives.set_tex_resource = set_tex_resource; | ||
611 | rdev->r600_blit.primitives.set_scissors = set_scissors; | ||
612 | rdev->r600_blit.primitives.draw_auto = draw_auto; | ||
613 | rdev->r600_blit.primitives.set_default_state = set_default_state; | ||
614 | |||
615 | rdev->r600_blit.ring_size_common = 55; /* shaders + def state */ | ||
616 | rdev->r600_blit.ring_size_common += 10; /* fence emit for VB IB */ | ||
617 | rdev->r600_blit.ring_size_common += 5; /* done copy */ | ||
618 | rdev->r600_blit.ring_size_common += 10; /* fence emit for done copy */ | ||
619 | |||
620 | rdev->r600_blit.ring_size_per_loop = 74; | ||
621 | |||
622 | rdev->r600_blit.max_dim = 16384; | ||
623 | |||
620 | /* pin copy shader into vram if already initialized */ | 624 | /* pin copy shader into vram if already initialized */ |
621 | if (rdev->r600_blit.shader_obj) | 625 | if (rdev->r600_blit.shader_obj) |
622 | goto done; | 626 | goto done; |
@@ -712,277 +716,3 @@ done: | |||
712 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | 716 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
713 | return 0; | 717 | return 0; |
714 | } | 718 | } |
715 | |||
716 | void evergreen_blit_fini(struct radeon_device *rdev) | ||
717 | { | ||
718 | int r; | ||
719 | |||
720 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
721 | if (rdev->r600_blit.shader_obj == NULL) | ||
722 | return; | ||
723 | /* If we can't reserve the bo, unref should be enough to destroy | ||
724 | * it when it becomes idle. | ||
725 | */ | ||
726 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
727 | if (!r) { | ||
728 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
729 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
730 | } | ||
731 | radeon_bo_unref(&rdev->r600_blit.shader_obj); | ||
732 | } | ||
733 | |||
734 | static int evergreen_vb_ib_get(struct radeon_device *rdev) | ||
735 | { | ||
736 | int r; | ||
737 | r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); | ||
738 | if (r) { | ||
739 | DRM_ERROR("failed to get IB for vertex buffer\n"); | ||
740 | return r; | ||
741 | } | ||
742 | |||
743 | rdev->r600_blit.vb_total = 64*1024; | ||
744 | rdev->r600_blit.vb_used = 0; | ||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | static void evergreen_vb_ib_put(struct radeon_device *rdev) | ||
749 | { | ||
750 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); | ||
751 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | ||
752 | } | ||
753 | |||
754 | int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | ||
755 | { | ||
756 | int r; | ||
757 | int ring_size, line_size; | ||
758 | int max_size; | ||
759 | /* loops of emits + fence emit possible */ | ||
760 | int dwords_per_loop = 74, num_loops; | ||
761 | |||
762 | r = evergreen_vb_ib_get(rdev); | ||
763 | if (r) | ||
764 | return r; | ||
765 | |||
766 | /* 8 bpp vs 32 bpp for xfer unit */ | ||
767 | if (size_bytes & 3) | ||
768 | line_size = 8192; | ||
769 | else | ||
770 | line_size = 8192 * 4; | ||
771 | |||
772 | max_size = 8192 * line_size; | ||
773 | |||
774 | /* major loops cover the max size transfer */ | ||
775 | num_loops = ((size_bytes + max_size) / max_size); | ||
776 | /* minor loops cover the extra non aligned bits */ | ||
777 | num_loops += ((size_bytes % line_size) ? 1 : 0); | ||
778 | /* calculate number of loops correctly */ | ||
779 | ring_size = num_loops * dwords_per_loop; | ||
780 | /* set default + shaders */ | ||
781 | ring_size += 55; /* shaders + def state */ | ||
782 | ring_size += 10; /* fence emit for VB IB */ | ||
783 | ring_size += 5; /* done copy */ | ||
784 | ring_size += 10; /* fence emit for done copy */ | ||
785 | r = radeon_ring_lock(rdev, ring_size); | ||
786 | if (r) | ||
787 | return r; | ||
788 | |||
789 | set_default_state(rdev); /* 36 */ | ||
790 | set_shaders(rdev); /* 16 */ | ||
791 | return 0; | ||
792 | } | ||
793 | |||
794 | void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) | ||
795 | { | ||
796 | int r; | ||
797 | |||
798 | if (rdev->r600_blit.vb_ib) | ||
799 | evergreen_vb_ib_put(rdev); | ||
800 | |||
801 | if (fence) | ||
802 | r = radeon_fence_emit(rdev, fence); | ||
803 | |||
804 | radeon_ring_unlock_commit(rdev); | ||
805 | } | ||
806 | |||
807 | void evergreen_kms_blit_copy(struct radeon_device *rdev, | ||
808 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
809 | int size_bytes) | ||
810 | { | ||
811 | int max_bytes; | ||
812 | u64 vb_gpu_addr; | ||
813 | u32 *vb; | ||
814 | |||
815 | DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, | ||
816 | size_bytes, rdev->r600_blit.vb_used); | ||
817 | vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); | ||
818 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { | ||
819 | max_bytes = 8192; | ||
820 | |||
821 | while (size_bytes) { | ||
822 | int cur_size = size_bytes; | ||
823 | int src_x = src_gpu_addr & 255; | ||
824 | int dst_x = dst_gpu_addr & 255; | ||
825 | int h = 1; | ||
826 | src_gpu_addr = src_gpu_addr & ~255ULL; | ||
827 | dst_gpu_addr = dst_gpu_addr & ~255ULL; | ||
828 | |||
829 | if (!src_x && !dst_x) { | ||
830 | h = (cur_size / max_bytes); | ||
831 | if (h > 8192) | ||
832 | h = 8192; | ||
833 | if (h == 0) | ||
834 | h = 1; | ||
835 | else | ||
836 | cur_size = max_bytes; | ||
837 | } else { | ||
838 | if (cur_size > max_bytes) | ||
839 | cur_size = max_bytes; | ||
840 | if (cur_size > (max_bytes - dst_x)) | ||
841 | cur_size = (max_bytes - dst_x); | ||
842 | if (cur_size > (max_bytes - src_x)) | ||
843 | cur_size = (max_bytes - src_x); | ||
844 | } | ||
845 | |||
846 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | ||
847 | WARN_ON(1); | ||
848 | } | ||
849 | |||
850 | vb[0] = i2f(dst_x); | ||
851 | vb[1] = 0; | ||
852 | vb[2] = i2f(src_x); | ||
853 | vb[3] = 0; | ||
854 | |||
855 | vb[4] = i2f(dst_x); | ||
856 | vb[5] = i2f(h); | ||
857 | vb[6] = i2f(src_x); | ||
858 | vb[7] = i2f(h); | ||
859 | |||
860 | vb[8] = i2f(dst_x + cur_size); | ||
861 | vb[9] = i2f(h); | ||
862 | vb[10] = i2f(src_x + cur_size); | ||
863 | vb[11] = i2f(h); | ||
864 | |||
865 | /* src 10 */ | ||
866 | set_tex_resource(rdev, FMT_8, | ||
867 | src_x + cur_size, h, src_x + cur_size, | ||
868 | src_gpu_addr); | ||
869 | |||
870 | /* 5 */ | ||
871 | cp_set_surface_sync(rdev, | ||
872 | PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); | ||
873 | |||
874 | |||
875 | /* dst 17 */ | ||
876 | set_render_target(rdev, COLOR_8, | ||
877 | dst_x + cur_size, h, | ||
878 | dst_gpu_addr); | ||
879 | |||
880 | /* scissors 12 */ | ||
881 | set_scissors(rdev, dst_x, 0, dst_x + cur_size, h); | ||
882 | |||
883 | /* 15 */ | ||
884 | vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; | ||
885 | set_vtx_resource(rdev, vb_gpu_addr); | ||
886 | |||
887 | /* draw 10 */ | ||
888 | draw_auto(rdev); | ||
889 | |||
890 | /* 5 */ | ||
891 | cp_set_surface_sync(rdev, | ||
892 | PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, | ||
893 | cur_size * h, dst_gpu_addr); | ||
894 | |||
895 | vb += 12; | ||
896 | rdev->r600_blit.vb_used += 12 * 4; | ||
897 | |||
898 | src_gpu_addr += cur_size * h; | ||
899 | dst_gpu_addr += cur_size * h; | ||
900 | size_bytes -= cur_size * h; | ||
901 | } | ||
902 | } else { | ||
903 | max_bytes = 8192 * 4; | ||
904 | |||
905 | while (size_bytes) { | ||
906 | int cur_size = size_bytes; | ||
907 | int src_x = (src_gpu_addr & 255); | ||
908 | int dst_x = (dst_gpu_addr & 255); | ||
909 | int h = 1; | ||
910 | src_gpu_addr = src_gpu_addr & ~255ULL; | ||
911 | dst_gpu_addr = dst_gpu_addr & ~255ULL; | ||
912 | |||
913 | if (!src_x && !dst_x) { | ||
914 | h = (cur_size / max_bytes); | ||
915 | if (h > 8192) | ||
916 | h = 8192; | ||
917 | if (h == 0) | ||
918 | h = 1; | ||
919 | else | ||
920 | cur_size = max_bytes; | ||
921 | } else { | ||
922 | if (cur_size > max_bytes) | ||
923 | cur_size = max_bytes; | ||
924 | if (cur_size > (max_bytes - dst_x)) | ||
925 | cur_size = (max_bytes - dst_x); | ||
926 | if (cur_size > (max_bytes - src_x)) | ||
927 | cur_size = (max_bytes - src_x); | ||
928 | } | ||
929 | |||
930 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | ||
931 | WARN_ON(1); | ||
932 | } | ||
933 | |||
934 | vb[0] = i2f(dst_x / 4); | ||
935 | vb[1] = 0; | ||
936 | vb[2] = i2f(src_x / 4); | ||
937 | vb[3] = 0; | ||
938 | |||
939 | vb[4] = i2f(dst_x / 4); | ||
940 | vb[5] = i2f(h); | ||
941 | vb[6] = i2f(src_x / 4); | ||
942 | vb[7] = i2f(h); | ||
943 | |||
944 | vb[8] = i2f((dst_x + cur_size) / 4); | ||
945 | vb[9] = i2f(h); | ||
946 | vb[10] = i2f((src_x + cur_size) / 4); | ||
947 | vb[11] = i2f(h); | ||
948 | |||
949 | /* src 10 */ | ||
950 | set_tex_resource(rdev, FMT_8_8_8_8, | ||
951 | (src_x + cur_size) / 4, | ||
952 | h, (src_x + cur_size) / 4, | ||
953 | src_gpu_addr); | ||
954 | /* 5 */ | ||
955 | cp_set_surface_sync(rdev, | ||
956 | PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); | ||
957 | |||
958 | /* dst 17 */ | ||
959 | set_render_target(rdev, COLOR_8_8_8_8, | ||
960 | (dst_x + cur_size) / 4, h, | ||
961 | dst_gpu_addr); | ||
962 | |||
963 | /* scissors 12 */ | ||
964 | set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h); | ||
965 | |||
966 | /* Vertex buffer setup 15 */ | ||
967 | vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; | ||
968 | set_vtx_resource(rdev, vb_gpu_addr); | ||
969 | |||
970 | /* draw 10 */ | ||
971 | draw_auto(rdev); | ||
972 | |||
973 | /* 5 */ | ||
974 | cp_set_surface_sync(rdev, | ||
975 | PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, | ||
976 | cur_size * h, dst_gpu_addr); | ||
977 | |||
978 | /* 74 ring dwords per loop */ | ||
979 | vb += 12; | ||
980 | rdev->r600_blit.vb_used += 12 * 4; | ||
981 | |||
982 | src_gpu_addr += cur_size * h; | ||
983 | dst_gpu_addr += cur_size * h; | ||
984 | size_bytes -= cur_size * h; | ||
985 | } | ||
986 | } | ||
987 | } | ||
988 | |||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index a134790903d3..7fdfa8ea7570 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -122,12 +122,6 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track) | |||
122 | track->db_s_write_bo = NULL; | 122 | track->db_s_write_bo = NULL; |
123 | } | 123 | } |
124 | 124 | ||
125 | static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | ||
126 | { | ||
127 | /* XXX fill in */ | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static int evergreen_cs_track_check(struct radeon_cs_parser *p) | 125 | static int evergreen_cs_track_check(struct radeon_cs_parser *p) |
132 | { | 126 | { |
133 | struct evergreen_cs_track *track = p->track; | 127 | struct evergreen_cs_track *track = p->track; |
@@ -236,28 +230,6 @@ static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, | |||
236 | } | 230 | } |
237 | 231 | ||
238 | /** | 232 | /** |
239 | * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc | ||
240 | * @parser: parser structure holding parsing context. | ||
241 | * | ||
242 | * Check next packet is relocation packet3, do bo validation and compute | ||
243 | * GPU offset using the provided start. | ||
244 | **/ | ||
245 | static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) | ||
246 | { | ||
247 | struct radeon_cs_packet p3reloc; | ||
248 | int r; | ||
249 | |||
250 | r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); | ||
251 | if (r) { | ||
252 | return 0; | ||
253 | } | ||
254 | if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { | ||
255 | return 0; | ||
256 | } | ||
257 | return 1; | ||
258 | } | ||
259 | |||
260 | /** | ||
261 | * evergreen_cs_packet_next_vline() - parse userspace VLINE packet | 233 | * evergreen_cs_packet_next_vline() - parse userspace VLINE packet |
262 | * @parser: parser structure holding parsing context. | 234 | * @parser: parser structure holding parsing context. |
263 | * | 235 | * |
@@ -414,7 +386,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p, | |||
414 | * if register is safe. If register is not flag as safe this function | 386 | * if register is safe. If register is not flag as safe this function |
415 | * will test it against a list of register needind special handling. | 387 | * will test it against a list of register needind special handling. |
416 | */ | 388 | */ |
417 | static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | 389 | static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) |
418 | { | 390 | { |
419 | struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; | 391 | struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; |
420 | struct radeon_cs_reloc *reloc; | 392 | struct radeon_cs_reloc *reloc; |
@@ -990,7 +962,7 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3 | |||
990 | * This function will check that the resource has valid field and that | 962 | * This function will check that the resource has valid field and that |
991 | * the texture and mipmap bo object are big enough to cover this resource. | 963 | * the texture and mipmap bo object are big enough to cover this resource. |
992 | */ | 964 | */ |
993 | static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | 965 | static int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx, |
994 | struct radeon_bo *texture, | 966 | struct radeon_bo *texture, |
995 | struct radeon_bo *mipmap) | 967 | struct radeon_bo *mipmap) |
996 | { | 968 | { |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 7363d9dec909..b937c49054d9 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -941,11 +941,15 @@ | |||
941 | #define CB_COLOR0_SLICE 0x28c68 | 941 | #define CB_COLOR0_SLICE 0x28c68 |
942 | #define CB_COLOR0_VIEW 0x28c6c | 942 | #define CB_COLOR0_VIEW 0x28c6c |
943 | #define CB_COLOR0_INFO 0x28c70 | 943 | #define CB_COLOR0_INFO 0x28c70 |
944 | # define CB_FORMAT(x) ((x) << 2) | ||
944 | # define CB_ARRAY_MODE(x) ((x) << 8) | 945 | # define CB_ARRAY_MODE(x) ((x) << 8) |
945 | # define ARRAY_LINEAR_GENERAL 0 | 946 | # define ARRAY_LINEAR_GENERAL 0 |
946 | # define ARRAY_LINEAR_ALIGNED 1 | 947 | # define ARRAY_LINEAR_ALIGNED 1 |
947 | # define ARRAY_1D_TILED_THIN1 2 | 948 | # define ARRAY_1D_TILED_THIN1 2 |
948 | # define ARRAY_2D_TILED_THIN1 4 | 949 | # define ARRAY_2D_TILED_THIN1 4 |
950 | # define CB_SOURCE_FORMAT(x) ((x) << 24) | ||
951 | # define CB_SF_EXPORT_FULL 0 | ||
952 | # define CB_SF_EXPORT_NORM 1 | ||
949 | #define CB_COLOR0_ATTRIB 0x28c74 | 953 | #define CB_COLOR0_ATTRIB 0x28c74 |
950 | #define CB_COLOR0_DIM 0x28c78 | 954 | #define CB_COLOR0_DIM 0x28c78 |
951 | /* only CB0-7 blocks have these regs */ | 955 | /* only CB0-7 blocks have these regs */ |
@@ -1107,15 +1111,53 @@ | |||
1107 | #define CB_COLOR7_CLEAR_WORD3 0x28e3c | 1111 | #define CB_COLOR7_CLEAR_WORD3 0x28e3c |
1108 | 1112 | ||
1109 | #define SQ_TEX_RESOURCE_WORD0_0 0x30000 | 1113 | #define SQ_TEX_RESOURCE_WORD0_0 0x30000 |
1114 | # define TEX_DIM(x) ((x) << 0) | ||
1115 | # define SQ_TEX_DIM_1D 0 | ||
1116 | # define SQ_TEX_DIM_2D 1 | ||
1117 | # define SQ_TEX_DIM_3D 2 | ||
1118 | # define SQ_TEX_DIM_CUBEMAP 3 | ||
1119 | # define SQ_TEX_DIM_1D_ARRAY 4 | ||
1120 | # define SQ_TEX_DIM_2D_ARRAY 5 | ||
1121 | # define SQ_TEX_DIM_2D_MSAA 6 | ||
1122 | # define SQ_TEX_DIM_2D_ARRAY_MSAA 7 | ||
1110 | #define SQ_TEX_RESOURCE_WORD1_0 0x30004 | 1123 | #define SQ_TEX_RESOURCE_WORD1_0 0x30004 |
1111 | # define TEX_ARRAY_MODE(x) ((x) << 28) | 1124 | # define TEX_ARRAY_MODE(x) ((x) << 28) |
1112 | #define SQ_TEX_RESOURCE_WORD2_0 0x30008 | 1125 | #define SQ_TEX_RESOURCE_WORD2_0 0x30008 |
1113 | #define SQ_TEX_RESOURCE_WORD3_0 0x3000C | 1126 | #define SQ_TEX_RESOURCE_WORD3_0 0x3000C |
1114 | #define SQ_TEX_RESOURCE_WORD4_0 0x30010 | 1127 | #define SQ_TEX_RESOURCE_WORD4_0 0x30010 |
1128 | # define TEX_DST_SEL_X(x) ((x) << 16) | ||
1129 | # define TEX_DST_SEL_Y(x) ((x) << 19) | ||
1130 | # define TEX_DST_SEL_Z(x) ((x) << 22) | ||
1131 | # define TEX_DST_SEL_W(x) ((x) << 25) | ||
1132 | # define SQ_SEL_X 0 | ||
1133 | # define SQ_SEL_Y 1 | ||
1134 | # define SQ_SEL_Z 2 | ||
1135 | # define SQ_SEL_W 3 | ||
1136 | # define SQ_SEL_0 4 | ||
1137 | # define SQ_SEL_1 5 | ||
1115 | #define SQ_TEX_RESOURCE_WORD5_0 0x30014 | 1138 | #define SQ_TEX_RESOURCE_WORD5_0 0x30014 |
1116 | #define SQ_TEX_RESOURCE_WORD6_0 0x30018 | 1139 | #define SQ_TEX_RESOURCE_WORD6_0 0x30018 |
1117 | #define SQ_TEX_RESOURCE_WORD7_0 0x3001c | 1140 | #define SQ_TEX_RESOURCE_WORD7_0 0x3001c |
1118 | 1141 | ||
1142 | #define SQ_VTX_CONSTANT_WORD0_0 0x30000 | ||
1143 | #define SQ_VTX_CONSTANT_WORD1_0 0x30004 | ||
1144 | #define SQ_VTX_CONSTANT_WORD2_0 0x30008 | ||
1145 | # define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0) | ||
1146 | # define SQ_VTXC_STRIDE(x) ((x) << 8) | ||
1147 | # define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30) | ||
1148 | # define SQ_ENDIAN_NONE 0 | ||
1149 | # define SQ_ENDIAN_8IN16 1 | ||
1150 | # define SQ_ENDIAN_8IN32 2 | ||
1151 | #define SQ_VTX_CONSTANT_WORD3_0 0x3000C | ||
1152 | # define SQ_VTCX_SEL_X(x) ((x) << 3) | ||
1153 | # define SQ_VTCX_SEL_Y(x) ((x) << 6) | ||
1154 | # define SQ_VTCX_SEL_Z(x) ((x) << 9) | ||
1155 | # define SQ_VTCX_SEL_W(x) ((x) << 12) | ||
1156 | #define SQ_VTX_CONSTANT_WORD4_0 0x30010 | ||
1157 | #define SQ_VTX_CONSTANT_WORD5_0 0x30014 | ||
1158 | #define SQ_VTX_CONSTANT_WORD6_0 0x30018 | ||
1159 | #define SQ_VTX_CONSTANT_WORD7_0 0x3001c | ||
1160 | |||
1119 | /* cayman 3D regs */ | 1161 | /* cayman 3D regs */ |
1120 | #define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0 | 1162 | #define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0 |
1121 | #define CAYMAN_DB_EQAA 0x28804 | 1163 | #define CAYMAN_DB_EQAA 0x28804 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 8c79ca97753d..556b7bc3418b 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -40,6 +40,7 @@ extern void evergreen_mc_program(struct radeon_device *rdev); | |||
40 | extern void evergreen_irq_suspend(struct radeon_device *rdev); | 40 | extern void evergreen_irq_suspend(struct radeon_device *rdev); |
41 | extern int evergreen_mc_init(struct radeon_device *rdev); | 41 | extern int evergreen_mc_init(struct radeon_device *rdev); |
42 | extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); | 42 | extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); |
43 | extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | ||
43 | 44 | ||
44 | #define EVERGREEN_PFP_UCODE_SIZE 1120 | 45 | #define EVERGREEN_PFP_UCODE_SIZE 1120 |
45 | #define EVERGREEN_PM4_UCODE_SIZE 1376 | 46 | #define EVERGREEN_PM4_UCODE_SIZE 1376 |
@@ -967,6 +968,9 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev) | |||
967 | WREG32(VM_CONTEXT1_CNTL, 0); | 968 | WREG32(VM_CONTEXT1_CNTL, 0); |
968 | 969 | ||
969 | cayman_pcie_gart_tlb_flush(rdev); | 970 | cayman_pcie_gart_tlb_flush(rdev); |
971 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | ||
972 | (unsigned)(rdev->mc.gtt_size >> 20), | ||
973 | (unsigned long long)rdev->gart.table_addr); | ||
970 | rdev->gart.ready = true; | 974 | rdev->gart.ready = true; |
971 | return 0; | 975 | return 0; |
972 | } | 976 | } |
@@ -1341,6 +1345,9 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1341 | { | 1345 | { |
1342 | int r; | 1346 | int r; |
1343 | 1347 | ||
1348 | /* enable pcie gen2 link */ | ||
1349 | evergreen_pcie_gen2_enable(rdev); | ||
1350 | |||
1344 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | 1351 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { |
1345 | r = ni_init_microcode(rdev); | 1352 | r = ni_init_microcode(rdev); |
1346 | if (r) { | 1353 | if (r) { |
@@ -1362,7 +1369,7 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1362 | 1369 | ||
1363 | r = evergreen_blit_init(rdev); | 1370 | r = evergreen_blit_init(rdev); |
1364 | if (r) { | 1371 | if (r) { |
1365 | evergreen_blit_fini(rdev); | 1372 | r600_blit_fini(rdev); |
1366 | rdev->asic->copy = NULL; | 1373 | rdev->asic->copy = NULL; |
1367 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 1374 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
1368 | } | 1375 | } |
@@ -1423,21 +1430,13 @@ int cayman_resume(struct radeon_device *rdev) | |||
1423 | 1430 | ||
1424 | int cayman_suspend(struct radeon_device *rdev) | 1431 | int cayman_suspend(struct radeon_device *rdev) |
1425 | { | 1432 | { |
1426 | int r; | ||
1427 | |||
1428 | /* FIXME: we should wait for ring to be empty */ | 1433 | /* FIXME: we should wait for ring to be empty */ |
1429 | cayman_cp_enable(rdev, false); | 1434 | cayman_cp_enable(rdev, false); |
1430 | rdev->cp.ready = false; | 1435 | rdev->cp.ready = false; |
1431 | evergreen_irq_suspend(rdev); | 1436 | evergreen_irq_suspend(rdev); |
1432 | radeon_wb_disable(rdev); | 1437 | radeon_wb_disable(rdev); |
1433 | cayman_pcie_gart_disable(rdev); | 1438 | cayman_pcie_gart_disable(rdev); |
1434 | 1439 | r600_blit_suspend(rdev); | |
1435 | /* unpin shaders bo */ | ||
1436 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
1437 | if (likely(r == 0)) { | ||
1438 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
1439 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1440 | } | ||
1441 | 1440 | ||
1442 | return 0; | 1441 | return 0; |
1443 | } | 1442 | } |
@@ -1550,7 +1549,7 @@ int cayman_init(struct radeon_device *rdev) | |||
1550 | 1549 | ||
1551 | void cayman_fini(struct radeon_device *rdev) | 1550 | void cayman_fini(struct radeon_device *rdev) |
1552 | { | 1551 | { |
1553 | evergreen_blit_fini(rdev); | 1552 | r600_blit_fini(rdev); |
1554 | cayman_cp_fini(rdev); | 1553 | cayman_cp_fini(rdev); |
1555 | r600_irq_fini(rdev); | 1554 | r600_irq_fini(rdev); |
1556 | radeon_wb_fini(rdev); | 1555 | radeon_wb_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 7fcdbbbf2979..8f8b8fa14357 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -68,6 +68,108 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
68 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 68 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
69 | */ | 69 | */ |
70 | 70 | ||
71 | int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | ||
72 | struct radeon_cs_packet *pkt, | ||
73 | unsigned idx, | ||
74 | unsigned reg) | ||
75 | { | ||
76 | int r; | ||
77 | u32 tile_flags = 0; | ||
78 | u32 tmp; | ||
79 | struct radeon_cs_reloc *reloc; | ||
80 | u32 value; | ||
81 | |||
82 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
83 | if (r) { | ||
84 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
85 | idx, reg); | ||
86 | r100_cs_dump_packet(p, pkt); | ||
87 | return r; | ||
88 | } | ||
89 | value = radeon_get_ib_value(p, idx); | ||
90 | tmp = value & 0x003fffff; | ||
91 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | ||
92 | |||
93 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
94 | tile_flags |= RADEON_DST_TILE_MACRO; | ||
95 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
96 | if (reg == RADEON_SRC_PITCH_OFFSET) { | ||
97 | DRM_ERROR("Cannot src blit from microtiled surface\n"); | ||
98 | r100_cs_dump_packet(p, pkt); | ||
99 | return -EINVAL; | ||
100 | } | ||
101 | tile_flags |= RADEON_DST_TILE_MICRO; | ||
102 | } | ||
103 | |||
104 | tmp |= tile_flags; | ||
105 | p->ib->ptr[idx] = (value & 0x3fc00000) | tmp; | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, | ||
110 | struct radeon_cs_packet *pkt, | ||
111 | int idx) | ||
112 | { | ||
113 | unsigned c, i; | ||
114 | struct radeon_cs_reloc *reloc; | ||
115 | struct r100_cs_track *track; | ||
116 | int r = 0; | ||
117 | volatile uint32_t *ib; | ||
118 | u32 idx_value; | ||
119 | |||
120 | ib = p->ib->ptr; | ||
121 | track = (struct r100_cs_track *)p->track; | ||
122 | c = radeon_get_ib_value(p, idx++) & 0x1F; | ||
123 | if (c > 16) { | ||
124 | DRM_ERROR("Only 16 vertex buffers are allowed %d\n", | ||
125 | pkt->opcode); | ||
126 | r100_cs_dump_packet(p, pkt); | ||
127 | return -EINVAL; | ||
128 | } | ||
129 | track->num_arrays = c; | ||
130 | for (i = 0; i < (c - 1); i+=2, idx+=3) { | ||
131 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
132 | if (r) { | ||
133 | DRM_ERROR("No reloc for packet3 %d\n", | ||
134 | pkt->opcode); | ||
135 | r100_cs_dump_packet(p, pkt); | ||
136 | return r; | ||
137 | } | ||
138 | idx_value = radeon_get_ib_value(p, idx); | ||
139 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); | ||
140 | |||
141 | track->arrays[i + 0].esize = idx_value >> 8; | ||
142 | track->arrays[i + 0].robj = reloc->robj; | ||
143 | track->arrays[i + 0].esize &= 0x7F; | ||
144 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
145 | if (r) { | ||
146 | DRM_ERROR("No reloc for packet3 %d\n", | ||
147 | pkt->opcode); | ||
148 | r100_cs_dump_packet(p, pkt); | ||
149 | return r; | ||
150 | } | ||
151 | ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); | ||
152 | track->arrays[i + 1].robj = reloc->robj; | ||
153 | track->arrays[i + 1].esize = idx_value >> 24; | ||
154 | track->arrays[i + 1].esize &= 0x7F; | ||
155 | } | ||
156 | if (c & 1) { | ||
157 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
158 | if (r) { | ||
159 | DRM_ERROR("No reloc for packet3 %d\n", | ||
160 | pkt->opcode); | ||
161 | r100_cs_dump_packet(p, pkt); | ||
162 | return r; | ||
163 | } | ||
164 | idx_value = radeon_get_ib_value(p, idx); | ||
165 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); | ||
166 | track->arrays[i + 0].robj = reloc->robj; | ||
167 | track->arrays[i + 0].esize = idx_value >> 8; | ||
168 | track->arrays[i + 0].esize &= 0x7F; | ||
169 | } | ||
170 | return r; | ||
171 | } | ||
172 | |||
71 | void r100_pre_page_flip(struct radeon_device *rdev, int crtc) | 173 | void r100_pre_page_flip(struct radeon_device *rdev, int crtc) |
72 | { | 174 | { |
73 | /* enable the pflip int */ | 175 | /* enable the pflip int */ |
@@ -513,6 +615,9 @@ int r100_pci_gart_enable(struct radeon_device *rdev) | |||
513 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; | 615 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; |
514 | WREG32(RADEON_AIC_CNTL, tmp); | 616 | WREG32(RADEON_AIC_CNTL, tmp); |
515 | r100_pci_gart_tlb_flush(rdev); | 617 | r100_pci_gart_tlb_flush(rdev); |
618 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | ||
619 | (unsigned)(rdev->mc.gtt_size >> 20), | ||
620 | (unsigned long long)rdev->gart.table_addr); | ||
516 | rdev->gart.ready = true; | 621 | rdev->gart.ready = true; |
517 | return 0; | 622 | return 0; |
518 | } | 623 | } |
@@ -588,7 +693,7 @@ void r100_irq_disable(struct radeon_device *rdev) | |||
588 | WREG32(R_000044_GEN_INT_STATUS, tmp); | 693 | WREG32(R_000044_GEN_INT_STATUS, tmp); |
589 | } | 694 | } |
590 | 695 | ||
591 | static inline uint32_t r100_irq_ack(struct radeon_device *rdev) | 696 | static uint32_t r100_irq_ack(struct radeon_device *rdev) |
592 | { | 697 | { |
593 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); | 698 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); |
594 | uint32_t irq_mask = RADEON_SW_INT_TEST | | 699 | uint32_t irq_mask = RADEON_SW_INT_TEST | |
@@ -3147,7 +3252,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
3147 | } | 3252 | } |
3148 | } | 3253 | } |
3149 | 3254 | ||
3150 | static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) | 3255 | static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) |
3151 | { | 3256 | { |
3152 | DRM_ERROR("pitch %d\n", t->pitch); | 3257 | DRM_ERROR("pitch %d\n", t->pitch); |
3153 | DRM_ERROR("use_pitch %d\n", t->use_pitch); | 3258 | DRM_ERROR("use_pitch %d\n", t->use_pitch); |
@@ -3965,3 +4070,43 @@ int r100_init(struct radeon_device *rdev) | |||
3965 | } | 4070 | } |
3966 | return 0; | 4071 | return 0; |
3967 | } | 4072 | } |
4073 | |||
4074 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | ||
4075 | { | ||
4076 | if (reg < rdev->rmmio_size) | ||
4077 | return readl(((void __iomem *)rdev->rmmio) + reg); | ||
4078 | else { | ||
4079 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | ||
4080 | return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); | ||
4081 | } | ||
4082 | } | ||
4083 | |||
4084 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
4085 | { | ||
4086 | if (reg < rdev->rmmio_size) | ||
4087 | writel(v, ((void __iomem *)rdev->rmmio) + reg); | ||
4088 | else { | ||
4089 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | ||
4090 | writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); | ||
4091 | } | ||
4092 | } | ||
4093 | |||
4094 | u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) | ||
4095 | { | ||
4096 | if (reg < rdev->rio_mem_size) | ||
4097 | return ioread32(rdev->rio_mem + reg); | ||
4098 | else { | ||
4099 | iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); | ||
4100 | return ioread32(rdev->rio_mem + RADEON_MM_DATA); | ||
4101 | } | ||
4102 | } | ||
4103 | |||
4104 | void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) | ||
4105 | { | ||
4106 | if (reg < rdev->rio_mem_size) | ||
4107 | iowrite32(v, rdev->rio_mem + reg); | ||
4108 | else { | ||
4109 | iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); | ||
4110 | iowrite32(v, rdev->rio_mem + RADEON_MM_DATA); | ||
4111 | } | ||
4112 | } | ||
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index 686f9dc5d4bd..6a603b378adb 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
@@ -92,106 +92,10 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
92 | struct radeon_cs_packet *pkt, | 92 | struct radeon_cs_packet *pkt, |
93 | unsigned idx, unsigned reg); | 93 | unsigned idx, unsigned reg); |
94 | 94 | ||
95 | 95 | int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | |
96 | 96 | struct radeon_cs_packet *pkt, | |
97 | static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | 97 | unsigned idx, |
98 | struct radeon_cs_packet *pkt, | 98 | unsigned reg); |
99 | unsigned idx, | 99 | int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, |
100 | unsigned reg) | 100 | struct radeon_cs_packet *pkt, |
101 | { | 101 | int idx); |
102 | int r; | ||
103 | u32 tile_flags = 0; | ||
104 | u32 tmp; | ||
105 | struct radeon_cs_reloc *reloc; | ||
106 | u32 value; | ||
107 | |||
108 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
109 | if (r) { | ||
110 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
111 | idx, reg); | ||
112 | r100_cs_dump_packet(p, pkt); | ||
113 | return r; | ||
114 | } | ||
115 | value = radeon_get_ib_value(p, idx); | ||
116 | tmp = value & 0x003fffff; | ||
117 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | ||
118 | |||
119 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
120 | tile_flags |= RADEON_DST_TILE_MACRO; | ||
121 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
122 | if (reg == RADEON_SRC_PITCH_OFFSET) { | ||
123 | DRM_ERROR("Cannot src blit from microtiled surface\n"); | ||
124 | r100_cs_dump_packet(p, pkt); | ||
125 | return -EINVAL; | ||
126 | } | ||
127 | tile_flags |= RADEON_DST_TILE_MICRO; | ||
128 | } | ||
129 | |||
130 | tmp |= tile_flags; | ||
131 | p->ib->ptr[idx] = (value & 0x3fc00000) | tmp; | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, | ||
136 | struct radeon_cs_packet *pkt, | ||
137 | int idx) | ||
138 | { | ||
139 | unsigned c, i; | ||
140 | struct radeon_cs_reloc *reloc; | ||
141 | struct r100_cs_track *track; | ||
142 | int r = 0; | ||
143 | volatile uint32_t *ib; | ||
144 | u32 idx_value; | ||
145 | |||
146 | ib = p->ib->ptr; | ||
147 | track = (struct r100_cs_track *)p->track; | ||
148 | c = radeon_get_ib_value(p, idx++) & 0x1F; | ||
149 | if (c > 16) { | ||
150 | DRM_ERROR("Only 16 vertex buffers are allowed %d\n", | ||
151 | pkt->opcode); | ||
152 | r100_cs_dump_packet(p, pkt); | ||
153 | return -EINVAL; | ||
154 | } | ||
155 | track->num_arrays = c; | ||
156 | for (i = 0; i < (c - 1); i+=2, idx+=3) { | ||
157 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
158 | if (r) { | ||
159 | DRM_ERROR("No reloc for packet3 %d\n", | ||
160 | pkt->opcode); | ||
161 | r100_cs_dump_packet(p, pkt); | ||
162 | return r; | ||
163 | } | ||
164 | idx_value = radeon_get_ib_value(p, idx); | ||
165 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); | ||
166 | |||
167 | track->arrays[i + 0].esize = idx_value >> 8; | ||
168 | track->arrays[i + 0].robj = reloc->robj; | ||
169 | track->arrays[i + 0].esize &= 0x7F; | ||
170 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
171 | if (r) { | ||
172 | DRM_ERROR("No reloc for packet3 %d\n", | ||
173 | pkt->opcode); | ||
174 | r100_cs_dump_packet(p, pkt); | ||
175 | return r; | ||
176 | } | ||
177 | ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); | ||
178 | track->arrays[i + 1].robj = reloc->robj; | ||
179 | track->arrays[i + 1].esize = idx_value >> 24; | ||
180 | track->arrays[i + 1].esize &= 0x7F; | ||
181 | } | ||
182 | if (c & 1) { | ||
183 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
184 | if (r) { | ||
185 | DRM_ERROR("No reloc for packet3 %d\n", | ||
186 | pkt->opcode); | ||
187 | r100_cs_dump_packet(p, pkt); | ||
188 | return r; | ||
189 | } | ||
190 | idx_value = radeon_get_ib_value(p, idx); | ||
191 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); | ||
192 | track->arrays[i + 0].robj = reloc->robj; | ||
193 | track->arrays[i + 0].esize = idx_value >> 8; | ||
194 | track->arrays[i + 0].esize &= 0x7F; | ||
195 | } | ||
196 | return r; | ||
197 | } | ||
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 55a7f190027e..33f2b68c680b 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -144,8 +144,9 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) | |||
144 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; | 144 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
145 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); | 145 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
146 | rv370_pcie_gart_tlb_flush(rdev); | 146 | rv370_pcie_gart_tlb_flush(rdev); |
147 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n", | 147 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
148 | (unsigned)(rdev->mc.gtt_size >> 20), table_addr); | 148 | (unsigned)(rdev->mc.gtt_size >> 20), |
149 | (unsigned long long)table_addr); | ||
149 | rdev->gart.ready = true; | 150 | rdev->gart.ready = true; |
150 | return 0; | 151 | return 0; |
151 | } | 152 | } |
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index c5c2742e4140..1fe98b421c9b 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c | |||
@@ -791,7 +791,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, | |||
791 | /** | 791 | /** |
792 | * Emit the sequence to pacify R300. | 792 | * Emit the sequence to pacify R300. |
793 | */ | 793 | */ |
794 | static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) | 794 | static void r300_pacify(drm_radeon_private_t *dev_priv) |
795 | { | 795 | { |
796 | uint32_t cache_z, cache_3d, cache_2d; | 796 | uint32_t cache_z, cache_3d, cache_2d; |
797 | RING_LOCALS; | 797 | RING_LOCALS; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 720dd99163f8..12470b090ddf 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -993,6 +993,9 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
993 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | 993 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); |
994 | 994 | ||
995 | r600_pcie_gart_tlb_flush(rdev); | 995 | r600_pcie_gart_tlb_flush(rdev); |
996 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | ||
997 | (unsigned)(rdev->mc.gtt_size >> 20), | ||
998 | (unsigned long long)rdev->gart.table_addr); | ||
996 | rdev->gart.ready = true; | 999 | rdev->gart.ready = true; |
997 | return 0; | 1000 | return 0; |
998 | } | 1001 | } |
@@ -2362,19 +2365,33 @@ int r600_copy_blit(struct radeon_device *rdev, | |||
2362 | 2365 | ||
2363 | mutex_lock(&rdev->r600_blit.mutex); | 2366 | mutex_lock(&rdev->r600_blit.mutex); |
2364 | rdev->r600_blit.vb_ib = NULL; | 2367 | rdev->r600_blit.vb_ib = NULL; |
2365 | r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); | 2368 | r = r600_blit_prepare_copy(rdev, num_gpu_pages); |
2366 | if (r) { | 2369 | if (r) { |
2367 | if (rdev->r600_blit.vb_ib) | 2370 | if (rdev->r600_blit.vb_ib) |
2368 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 2371 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
2369 | mutex_unlock(&rdev->r600_blit.mutex); | 2372 | mutex_unlock(&rdev->r600_blit.mutex); |
2370 | return r; | 2373 | return r; |
2371 | } | 2374 | } |
2372 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); | 2375 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages); |
2373 | r600_blit_done_copy(rdev, fence); | 2376 | r600_blit_done_copy(rdev, fence); |
2374 | mutex_unlock(&rdev->r600_blit.mutex); | 2377 | mutex_unlock(&rdev->r600_blit.mutex); |
2375 | return 0; | 2378 | return 0; |
2376 | } | 2379 | } |
2377 | 2380 | ||
2381 | void r600_blit_suspend(struct radeon_device *rdev) | ||
2382 | { | ||
2383 | int r; | ||
2384 | |||
2385 | /* unpin shaders bo */ | ||
2386 | if (rdev->r600_blit.shader_obj) { | ||
2387 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
2388 | if (!r) { | ||
2389 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
2390 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
2391 | } | ||
2392 | } | ||
2393 | } | ||
2394 | |||
2378 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, | 2395 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
2379 | uint32_t tiling_flags, uint32_t pitch, | 2396 | uint32_t tiling_flags, uint32_t pitch, |
2380 | uint32_t offset, uint32_t obj_size) | 2397 | uint32_t offset, uint32_t obj_size) |
@@ -2494,8 +2511,6 @@ int r600_resume(struct radeon_device *rdev) | |||
2494 | 2511 | ||
2495 | int r600_suspend(struct radeon_device *rdev) | 2512 | int r600_suspend(struct radeon_device *rdev) |
2496 | { | 2513 | { |
2497 | int r; | ||
2498 | |||
2499 | r600_audio_fini(rdev); | 2514 | r600_audio_fini(rdev); |
2500 | /* FIXME: we should wait for ring to be empty */ | 2515 | /* FIXME: we should wait for ring to be empty */ |
2501 | r600_cp_stop(rdev); | 2516 | r600_cp_stop(rdev); |
@@ -2503,14 +2518,8 @@ int r600_suspend(struct radeon_device *rdev) | |||
2503 | r600_irq_suspend(rdev); | 2518 | r600_irq_suspend(rdev); |
2504 | radeon_wb_disable(rdev); | 2519 | radeon_wb_disable(rdev); |
2505 | r600_pcie_gart_disable(rdev); | 2520 | r600_pcie_gart_disable(rdev); |
2506 | /* unpin shaders bo */ | 2521 | r600_blit_suspend(rdev); |
2507 | if (rdev->r600_blit.shader_obj) { | 2522 | |
2508 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
2509 | if (!r) { | ||
2510 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
2511 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
2512 | } | ||
2513 | } | ||
2514 | return 0; | 2523 | return 0; |
2515 | } | 2524 | } |
2516 | 2525 | ||
@@ -3137,7 +3146,7 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3137 | return 0; | 3146 | return 0; |
3138 | } | 3147 | } |
3139 | 3148 | ||
3140 | static inline void r600_irq_ack(struct radeon_device *rdev) | 3149 | static void r600_irq_ack(struct radeon_device *rdev) |
3141 | { | 3150 | { |
3142 | u32 tmp; | 3151 | u32 tmp; |
3143 | 3152 | ||
@@ -3238,7 +3247,7 @@ void r600_irq_disable(struct radeon_device *rdev) | |||
3238 | r600_disable_interrupt_state(rdev); | 3247 | r600_disable_interrupt_state(rdev); |
3239 | } | 3248 | } |
3240 | 3249 | ||
3241 | static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) | 3250 | static u32 r600_get_ih_wptr(struct radeon_device *rdev) |
3242 | { | 3251 | { |
3243 | u32 wptr, tmp; | 3252 | u32 wptr, tmp; |
3244 | 3253 | ||
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index 7f1043448d25..3c031a48205d 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #define COLOR_5_6_5 0x8 | 41 | #define COLOR_5_6_5 0x8 |
42 | #define COLOR_8_8_8_8 0x1a | 42 | #define COLOR_8_8_8_8 0x1a |
43 | 43 | ||
44 | static inline void | 44 | static void |
45 | set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr) | 45 | set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr) |
46 | { | 46 | { |
47 | u32 cb_color_info; | 47 | u32 cb_color_info; |
@@ -99,7 +99,7 @@ set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 | |||
99 | ADVANCE_RING(); | 99 | ADVANCE_RING(); |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline void | 102 | static void |
103 | cp_set_surface_sync(drm_radeon_private_t *dev_priv, | 103 | cp_set_surface_sync(drm_radeon_private_t *dev_priv, |
104 | u32 sync_type, u32 size, u64 mc_addr) | 104 | u32 sync_type, u32 size, u64 mc_addr) |
105 | { | 105 | { |
@@ -121,7 +121,7 @@ cp_set_surface_sync(drm_radeon_private_t *dev_priv, | |||
121 | ADVANCE_RING(); | 121 | ADVANCE_RING(); |
122 | } | 122 | } |
123 | 123 | ||
124 | static inline void | 124 | static void |
125 | set_shaders(struct drm_device *dev) | 125 | set_shaders(struct drm_device *dev) |
126 | { | 126 | { |
127 | drm_radeon_private_t *dev_priv = dev->dev_private; | 127 | drm_radeon_private_t *dev_priv = dev->dev_private; |
@@ -184,7 +184,7 @@ set_shaders(struct drm_device *dev) | |||
184 | R600_SH_ACTION_ENA, 512, gpu_addr); | 184 | R600_SH_ACTION_ENA, 512, gpu_addr); |
185 | } | 185 | } |
186 | 186 | ||
187 | static inline void | 187 | static void |
188 | set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr) | 188 | set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr) |
189 | { | 189 | { |
190 | uint32_t sq_vtx_constant_word2; | 190 | uint32_t sq_vtx_constant_word2; |
@@ -220,7 +220,7 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr) | |||
220 | R600_VC_ACTION_ENA, 48, gpu_addr); | 220 | R600_VC_ACTION_ENA, 48, gpu_addr); |
221 | } | 221 | } |
222 | 222 | ||
223 | static inline void | 223 | static void |
224 | set_tex_resource(drm_radeon_private_t *dev_priv, | 224 | set_tex_resource(drm_radeon_private_t *dev_priv, |
225 | int format, int w, int h, int pitch, u64 gpu_addr) | 225 | int format, int w, int h, int pitch, u64 gpu_addr) |
226 | { | 226 | { |
@@ -258,7 +258,7 @@ set_tex_resource(drm_radeon_private_t *dev_priv, | |||
258 | 258 | ||
259 | } | 259 | } |
260 | 260 | ||
261 | static inline void | 261 | static void |
262 | set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2) | 262 | set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2) |
263 | { | 263 | { |
264 | RING_LOCALS; | 264 | RING_LOCALS; |
@@ -282,7 +282,7 @@ set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2) | |||
282 | ADVANCE_RING(); | 282 | ADVANCE_RING(); |
283 | } | 283 | } |
284 | 284 | ||
285 | static inline void | 285 | static void |
286 | draw_auto(drm_radeon_private_t *dev_priv) | 286 | draw_auto(drm_radeon_private_t *dev_priv) |
287 | { | 287 | { |
288 | RING_LOCALS; | 288 | RING_LOCALS; |
@@ -311,7 +311,7 @@ draw_auto(drm_radeon_private_t *dev_priv) | |||
311 | COMMIT_RING(); | 311 | COMMIT_RING(); |
312 | } | 312 | } |
313 | 313 | ||
314 | static inline void | 314 | static void |
315 | set_default_state(drm_radeon_private_t *dev_priv) | 315 | set_default_state(drm_radeon_private_t *dev_priv) |
316 | { | 316 | { |
317 | int i; | 317 | int i; |
@@ -489,7 +489,7 @@ set_default_state(drm_radeon_private_t *dev_priv) | |||
489 | ADVANCE_RING(); | 489 | ADVANCE_RING(); |
490 | } | 490 | } |
491 | 491 | ||
492 | static inline uint32_t i2f(uint32_t input) | 492 | static uint32_t i2f(uint32_t input) |
493 | { | 493 | { |
494 | u32 result, i, exponent, fraction; | 494 | u32 result, i, exponent, fraction; |
495 | 495 | ||
@@ -515,7 +515,7 @@ static inline uint32_t i2f(uint32_t input) | |||
515 | } | 515 | } |
516 | 516 | ||
517 | 517 | ||
518 | static inline int r600_nomm_get_vb(struct drm_device *dev) | 518 | static int r600_nomm_get_vb(struct drm_device *dev) |
519 | { | 519 | { |
520 | drm_radeon_private_t *dev_priv = dev->dev_private; | 520 | drm_radeon_private_t *dev_priv = dev->dev_private; |
521 | dev_priv->blit_vb = radeon_freelist_get(dev); | 521 | dev_priv->blit_vb = radeon_freelist_get(dev); |
@@ -526,7 +526,7 @@ static inline int r600_nomm_get_vb(struct drm_device *dev) | |||
526 | return 0; | 526 | return 0; |
527 | } | 527 | } |
528 | 528 | ||
529 | static inline void r600_nomm_put_vb(struct drm_device *dev) | 529 | static void r600_nomm_put_vb(struct drm_device *dev) |
530 | { | 530 | { |
531 | drm_radeon_private_t *dev_priv = dev->dev_private; | 531 | drm_radeon_private_t *dev_priv = dev->dev_private; |
532 | 532 | ||
@@ -534,7 +534,7 @@ static inline void r600_nomm_put_vb(struct drm_device *dev) | |||
534 | radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb); | 534 | radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb); |
535 | } | 535 | } |
536 | 536 | ||
537 | static inline void *r600_nomm_get_vb_ptr(struct drm_device *dev) | 537 | static void *r600_nomm_get_vb_ptr(struct drm_device *dev) |
538 | { | 538 | { |
539 | drm_radeon_private_t *dev_priv = dev->dev_private; | 539 | drm_radeon_private_t *dev_priv = dev->dev_private; |
540 | return (((char *)dev->agp_buffer_map->handle + | 540 | return (((char *)dev->agp_buffer_map->handle + |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 9aa74c3f8cb6..c4cf1308d4a1 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -42,6 +42,9 @@ | |||
42 | #define COLOR_5_6_5 0x8 | 42 | #define COLOR_5_6_5 0x8 |
43 | #define COLOR_8_8_8_8 0x1a | 43 | #define COLOR_8_8_8_8 0x1a |
44 | 44 | ||
45 | #define RECT_UNIT_H 32 | ||
46 | #define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H) | ||
47 | |||
45 | /* emits 21 on rv770+, 23 on r600 */ | 48 | /* emits 21 on rv770+, 23 on r600 */ |
46 | static void | 49 | static void |
47 | set_render_target(struct radeon_device *rdev, int format, | 50 | set_render_target(struct radeon_device *rdev, int format, |
@@ -54,7 +57,9 @@ set_render_target(struct radeon_device *rdev, int format, | |||
54 | if (h < 8) | 57 | if (h < 8) |
55 | h = 8; | 58 | h = 8; |
56 | 59 | ||
57 | cb_color_info = ((format << 2) | (1 << 27) | (1 << 8)); | 60 | cb_color_info = CB_FORMAT(format) | |
61 | CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) | | ||
62 | CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
58 | pitch = (w / 8) - 1; | 63 | pitch = (w / 8) - 1; |
59 | slice = ((w * h) / 64) - 1; | 64 | slice = ((w * h) / 64) - 1; |
60 | 65 | ||
@@ -164,9 +169,10 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
164 | { | 169 | { |
165 | u32 sq_vtx_constant_word2; | 170 | u32 sq_vtx_constant_word2; |
166 | 171 | ||
167 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); | 172 | sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | |
173 | SQ_VTXC_STRIDE(16); | ||
168 | #ifdef __BIG_ENDIAN | 174 | #ifdef __BIG_ENDIAN |
169 | sq_vtx_constant_word2 |= (2 << 30); | 175 | sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); |
170 | #endif | 176 | #endif |
171 | 177 | ||
172 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); | 178 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); |
@@ -202,18 +208,19 @@ set_tex_resource(struct radeon_device *rdev, | |||
202 | if (h < 1) | 208 | if (h < 1) |
203 | h = 1; | 209 | h = 1; |
204 | 210 | ||
205 | sq_tex_resource_word0 = (1 << 0) | (1 << 3); | 211 | sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) | |
206 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | | 212 | S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); |
207 | ((w - 1) << 19)); | 213 | sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) | |
214 | S_038000_TEX_WIDTH(w - 1); | ||
208 | 215 | ||
209 | sq_tex_resource_word1 = (format << 26); | 216 | sq_tex_resource_word1 = S_038004_DATA_FORMAT(format); |
210 | sq_tex_resource_word1 |= ((h - 1) << 0); | 217 | sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1); |
211 | 218 | ||
212 | sq_tex_resource_word4 = ((1 << 14) | | 219 | sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) | |
213 | (0 << 16) | | 220 | S_038010_DST_SEL_X(SQ_SEL_X) | |
214 | (1 << 19) | | 221 | S_038010_DST_SEL_Y(SQ_SEL_Y) | |
215 | (2 << 22) | | 222 | S_038010_DST_SEL_Z(SQ_SEL_Z) | |
216 | (3 << 25)); | 223 | S_038010_DST_SEL_W(SQ_SEL_W); |
217 | 224 | ||
218 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); | 225 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); |
219 | radeon_ring_write(rdev, 0); | 226 | radeon_ring_write(rdev, 0); |
@@ -450,7 +457,7 @@ set_default_state(struct radeon_device *rdev) | |||
450 | radeon_ring_write(rdev, sq_stack_resource_mgmt_2); | 457 | radeon_ring_write(rdev, sq_stack_resource_mgmt_2); |
451 | } | 458 | } |
452 | 459 | ||
453 | static inline uint32_t i2f(uint32_t input) | 460 | static uint32_t i2f(uint32_t input) |
454 | { | 461 | { |
455 | u32 result, i, exponent, fraction; | 462 | u32 result, i, exponent, fraction; |
456 | 463 | ||
@@ -483,6 +490,27 @@ int r600_blit_init(struct radeon_device *rdev) | |||
483 | u32 packet2s[16]; | 490 | u32 packet2s[16]; |
484 | int num_packet2s = 0; | 491 | int num_packet2s = 0; |
485 | 492 | ||
493 | rdev->r600_blit.primitives.set_render_target = set_render_target; | ||
494 | rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync; | ||
495 | rdev->r600_blit.primitives.set_shaders = set_shaders; | ||
496 | rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource; | ||
497 | rdev->r600_blit.primitives.set_tex_resource = set_tex_resource; | ||
498 | rdev->r600_blit.primitives.set_scissors = set_scissors; | ||
499 | rdev->r600_blit.primitives.draw_auto = draw_auto; | ||
500 | rdev->r600_blit.primitives.set_default_state = set_default_state; | ||
501 | |||
502 | rdev->r600_blit.ring_size_common = 40; /* shaders + def state */ | ||
503 | rdev->r600_blit.ring_size_common += 10; /* fence emit for VB IB */ | ||
504 | rdev->r600_blit.ring_size_common += 5; /* done copy */ | ||
505 | rdev->r600_blit.ring_size_common += 10; /* fence emit for done copy */ | ||
506 | |||
507 | rdev->r600_blit.ring_size_per_loop = 76; | ||
508 | /* set_render_target emits 2 extra dwords on rv6xx */ | ||
509 | if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) | ||
510 | rdev->r600_blit.ring_size_per_loop += 2; | ||
511 | |||
512 | rdev->r600_blit.max_dim = 8192; | ||
513 | |||
486 | /* pin copy shader into vram if already initialized */ | 514 | /* pin copy shader into vram if already initialized */ |
487 | if (rdev->r600_blit.shader_obj) | 515 | if (rdev->r600_blit.shader_obj) |
488 | goto done; | 516 | goto done; |
@@ -600,47 +628,80 @@ static void r600_vb_ib_put(struct radeon_device *rdev) | |||
600 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 628 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
601 | } | 629 | } |
602 | 630 | ||
603 | int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | 631 | static unsigned r600_blit_create_rect(unsigned num_gpu_pages, |
632 | int *width, int *height, int max_dim) | ||
633 | { | ||
634 | unsigned max_pages; | ||
635 | unsigned pages = num_gpu_pages; | ||
636 | int w, h; | ||
637 | |||
638 | if (num_gpu_pages == 0) { | ||
639 | /* not supposed to be called with no pages, but just in case */ | ||
640 | h = 0; | ||
641 | w = 0; | ||
642 | pages = 0; | ||
643 | WARN_ON(1); | ||
644 | } else { | ||
645 | int rect_order = 2; | ||
646 | h = RECT_UNIT_H; | ||
647 | while (num_gpu_pages / rect_order) { | ||
648 | h *= 2; | ||
649 | rect_order *= 4; | ||
650 | if (h >= max_dim) { | ||
651 | h = max_dim; | ||
652 | break; | ||
653 | } | ||
654 | } | ||
655 | max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H); | ||
656 | if (pages > max_pages) | ||
657 | pages = max_pages; | ||
658 | w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h; | ||
659 | w = (w / RECT_UNIT_W) * RECT_UNIT_W; | ||
660 | pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H); | ||
661 | BUG_ON(pages == 0); | ||
662 | } | ||
663 | |||
664 | |||
665 | DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages); | ||
666 | |||
667 | /* return width and height only of the caller wants it */ | ||
668 | if (height) | ||
669 | *height = h; | ||
670 | if (width) | ||
671 | *width = w; | ||
672 | |||
673 | return pages; | ||
674 | } | ||
675 | |||
676 | |||
677 | int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages) | ||
604 | { | 678 | { |
605 | int r; | 679 | int r; |
606 | int ring_size, line_size; | 680 | int ring_size; |
607 | int max_size; | 681 | int num_loops = 0; |
608 | /* loops of emits 64 + fence emit possible */ | 682 | int dwords_per_loop = rdev->r600_blit.ring_size_per_loop; |
609 | int dwords_per_loop = 76, num_loops; | ||
610 | 683 | ||
611 | r = r600_vb_ib_get(rdev); | 684 | r = r600_vb_ib_get(rdev); |
612 | if (r) | 685 | if (r) |
613 | return r; | 686 | return r; |
614 | 687 | ||
615 | /* set_render_target emits 2 extra dwords on rv6xx */ | 688 | /* num loops */ |
616 | if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) | 689 | while (num_gpu_pages) { |
617 | dwords_per_loop += 2; | 690 | num_gpu_pages -= |
618 | 691 | r600_blit_create_rect(num_gpu_pages, NULL, NULL, | |
619 | /* 8 bpp vs 32 bpp for xfer unit */ | 692 | rdev->r600_blit.max_dim); |
620 | if (size_bytes & 3) | 693 | num_loops++; |
621 | line_size = 8192; | 694 | } |
622 | else | ||
623 | line_size = 8192*4; | ||
624 | |||
625 | max_size = 8192 * line_size; | ||
626 | 695 | ||
627 | /* major loops cover the max size transfer */ | ||
628 | num_loops = ((size_bytes + max_size) / max_size); | ||
629 | /* minor loops cover the extra non aligned bits */ | ||
630 | num_loops += ((size_bytes % line_size) ? 1 : 0); | ||
631 | /* calculate number of loops correctly */ | 696 | /* calculate number of loops correctly */ |
632 | ring_size = num_loops * dwords_per_loop; | 697 | ring_size = num_loops * dwords_per_loop; |
633 | /* set default + shaders */ | 698 | ring_size += rdev->r600_blit.ring_size_common; |
634 | ring_size += 40; /* shaders + def state */ | ||
635 | ring_size += 10; /* fence emit for VB IB */ | ||
636 | ring_size += 5; /* done copy */ | ||
637 | ring_size += 10; /* fence emit for done copy */ | ||
638 | r = radeon_ring_lock(rdev, ring_size); | 699 | r = radeon_ring_lock(rdev, ring_size); |
639 | if (r) | 700 | if (r) |
640 | return r; | 701 | return r; |
641 | 702 | ||
642 | set_default_state(rdev); /* 14 */ | 703 | rdev->r600_blit.primitives.set_default_state(rdev); |
643 | set_shaders(rdev); /* 26 */ | 704 | rdev->r600_blit.primitives.set_shaders(rdev); |
644 | return 0; | 705 | return 0; |
645 | } | 706 | } |
646 | 707 | ||
@@ -659,182 +720,64 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) | |||
659 | 720 | ||
660 | void r600_kms_blit_copy(struct radeon_device *rdev, | 721 | void r600_kms_blit_copy(struct radeon_device *rdev, |
661 | u64 src_gpu_addr, u64 dst_gpu_addr, | 722 | u64 src_gpu_addr, u64 dst_gpu_addr, |
662 | int size_bytes) | 723 | unsigned num_gpu_pages) |
663 | { | 724 | { |
664 | int max_bytes; | ||
665 | u64 vb_gpu_addr; | 725 | u64 vb_gpu_addr; |
666 | u32 *vb; | 726 | u32 *vb; |
667 | 727 | ||
668 | DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, | 728 | DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", |
669 | size_bytes, rdev->r600_blit.vb_used); | 729 | src_gpu_addr, dst_gpu_addr, |
730 | num_gpu_pages, rdev->r600_blit.vb_used); | ||
670 | vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); | 731 | vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); |
671 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { | ||
672 | max_bytes = 8192; | ||
673 | |||
674 | while (size_bytes) { | ||
675 | int cur_size = size_bytes; | ||
676 | int src_x = src_gpu_addr & 255; | ||
677 | int dst_x = dst_gpu_addr & 255; | ||
678 | int h = 1; | ||
679 | src_gpu_addr = src_gpu_addr & ~255ULL; | ||
680 | dst_gpu_addr = dst_gpu_addr & ~255ULL; | ||
681 | |||
682 | if (!src_x && !dst_x) { | ||
683 | h = (cur_size / max_bytes); | ||
684 | if (h > 8192) | ||
685 | h = 8192; | ||
686 | if (h == 0) | ||
687 | h = 1; | ||
688 | else | ||
689 | cur_size = max_bytes; | ||
690 | } else { | ||
691 | if (cur_size > max_bytes) | ||
692 | cur_size = max_bytes; | ||
693 | if (cur_size > (max_bytes - dst_x)) | ||
694 | cur_size = (max_bytes - dst_x); | ||
695 | if (cur_size > (max_bytes - src_x)) | ||
696 | cur_size = (max_bytes - src_x); | ||
697 | } | ||
698 | |||
699 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | ||
700 | WARN_ON(1); | ||
701 | } | ||
702 | |||
703 | vb[0] = i2f(dst_x); | ||
704 | vb[1] = 0; | ||
705 | vb[2] = i2f(src_x); | ||
706 | vb[3] = 0; | ||
707 | |||
708 | vb[4] = i2f(dst_x); | ||
709 | vb[5] = i2f(h); | ||
710 | vb[6] = i2f(src_x); | ||
711 | vb[7] = i2f(h); | ||
712 | |||
713 | vb[8] = i2f(dst_x + cur_size); | ||
714 | vb[9] = i2f(h); | ||
715 | vb[10] = i2f(src_x + cur_size); | ||
716 | vb[11] = i2f(h); | ||
717 | |||
718 | /* src 9 */ | ||
719 | set_tex_resource(rdev, FMT_8, | ||
720 | src_x + cur_size, h, src_x + cur_size, | ||
721 | src_gpu_addr); | ||
722 | |||
723 | /* 5 */ | ||
724 | cp_set_surface_sync(rdev, | ||
725 | PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); | ||
726 | 732 | ||
727 | /* dst 23 */ | 733 | while (num_gpu_pages) { |
728 | set_render_target(rdev, COLOR_8, | 734 | int w, h; |
729 | dst_x + cur_size, h, | 735 | unsigned size_in_bytes; |
730 | dst_gpu_addr); | 736 | unsigned pages_per_loop = |
737 | r600_blit_create_rect(num_gpu_pages, &w, &h, | ||
738 | rdev->r600_blit.max_dim); | ||
731 | 739 | ||
732 | /* scissors 12 */ | 740 | size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE; |
733 | set_scissors(rdev, dst_x, 0, dst_x + cur_size, h); | 741 | DRM_DEBUG("rectangle w=%d h=%d\n", w, h); |
734 | 742 | ||
735 | /* 14 */ | 743 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { |
736 | vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; | 744 | WARN_ON(1); |
737 | set_vtx_resource(rdev, vb_gpu_addr); | ||
738 | |||
739 | /* draw 10 */ | ||
740 | draw_auto(rdev); | ||
741 | |||
742 | /* 5 */ | ||
743 | cp_set_surface_sync(rdev, | ||
744 | PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, | ||
745 | cur_size * h, dst_gpu_addr); | ||
746 | |||
747 | vb += 12; | ||
748 | rdev->r600_blit.vb_used += 12 * 4; | ||
749 | |||
750 | src_gpu_addr += cur_size * h; | ||
751 | dst_gpu_addr += cur_size * h; | ||
752 | size_bytes -= cur_size * h; | ||
753 | } | 745 | } |
754 | } else { | ||
755 | max_bytes = 8192 * 4; | ||
756 | |||
757 | while (size_bytes) { | ||
758 | int cur_size = size_bytes; | ||
759 | int src_x = (src_gpu_addr & 255); | ||
760 | int dst_x = (dst_gpu_addr & 255); | ||
761 | int h = 1; | ||
762 | src_gpu_addr = src_gpu_addr & ~255ULL; | ||
763 | dst_gpu_addr = dst_gpu_addr & ~255ULL; | ||
764 | |||
765 | if (!src_x && !dst_x) { | ||
766 | h = (cur_size / max_bytes); | ||
767 | if (h > 8192) | ||
768 | h = 8192; | ||
769 | if (h == 0) | ||
770 | h = 1; | ||
771 | else | ||
772 | cur_size = max_bytes; | ||
773 | } else { | ||
774 | if (cur_size > max_bytes) | ||
775 | cur_size = max_bytes; | ||
776 | if (cur_size > (max_bytes - dst_x)) | ||
777 | cur_size = (max_bytes - dst_x); | ||
778 | if (cur_size > (max_bytes - src_x)) | ||
779 | cur_size = (max_bytes - src_x); | ||
780 | } | ||
781 | |||
782 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | ||
783 | WARN_ON(1); | ||
784 | } | ||
785 | 746 | ||
786 | vb[0] = i2f(dst_x / 4); | 747 | vb[0] = 0; |
787 | vb[1] = 0; | 748 | vb[1] = 0; |
788 | vb[2] = i2f(src_x / 4); | 749 | vb[2] = 0; |
789 | vb[3] = 0; | 750 | vb[3] = 0; |
790 | 751 | ||
791 | vb[4] = i2f(dst_x / 4); | 752 | vb[4] = 0; |
792 | vb[5] = i2f(h); | 753 | vb[5] = i2f(h); |
793 | vb[6] = i2f(src_x / 4); | 754 | vb[6] = 0; |
794 | vb[7] = i2f(h); | 755 | vb[7] = i2f(h); |
795 | 756 | ||
796 | vb[8] = i2f((dst_x + cur_size) / 4); | 757 | vb[8] = i2f(w); |
797 | vb[9] = i2f(h); | 758 | vb[9] = i2f(h); |
798 | vb[10] = i2f((src_x + cur_size) / 4); | 759 | vb[10] = i2f(w); |
799 | vb[11] = i2f(h); | 760 | vb[11] = i2f(h); |
800 | 761 | ||
801 | /* src 9 */ | 762 | rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, |
802 | set_tex_resource(rdev, FMT_8_8_8_8, | 763 | w, h, w, src_gpu_addr); |
803 | (src_x + cur_size) / 4, | 764 | rdev->r600_blit.primitives.cp_set_surface_sync(rdev, |
804 | h, (src_x + cur_size) / 4, | 765 | PACKET3_TC_ACTION_ENA, |
805 | src_gpu_addr); | 766 | size_in_bytes, src_gpu_addr); |
806 | /* 5 */ | 767 | rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8, |
807 | cp_set_surface_sync(rdev, | 768 | w, h, dst_gpu_addr); |
808 | PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); | 769 | rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h); |
809 | 770 | vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; | |
810 | /* dst 23 */ | 771 | rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr); |
811 | set_render_target(rdev, COLOR_8_8_8_8, | 772 | rdev->r600_blit.primitives.draw_auto(rdev); |
812 | (dst_x + cur_size) / 4, h, | 773 | rdev->r600_blit.primitives.cp_set_surface_sync(rdev, |
813 | dst_gpu_addr); | 774 | PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, |
814 | 775 | size_in_bytes, dst_gpu_addr); | |
815 | /* scissors 12 */ | 776 | |
816 | set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h); | 777 | vb += 12; |
817 | 778 | rdev->r600_blit.vb_used += 4*12; | |
818 | /* Vertex buffer setup 14 */ | 779 | src_gpu_addr += size_in_bytes; |
819 | vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; | 780 | dst_gpu_addr += size_in_bytes; |
820 | set_vtx_resource(rdev, vb_gpu_addr); | 781 | num_gpu_pages -= pages_per_loop; |
821 | |||
822 | /* draw 10 */ | ||
823 | draw_auto(rdev); | ||
824 | |||
825 | /* 5 */ | ||
826 | cp_set_surface_sync(rdev, | ||
827 | PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, | ||
828 | cur_size * h, dst_gpu_addr); | ||
829 | |||
830 | /* 78 ring dwords per loop */ | ||
831 | vb += 12; | ||
832 | rdev->r600_blit.vb_used += 12 * 4; | ||
833 | |||
834 | src_gpu_addr += cur_size * h; | ||
835 | dst_gpu_addr += cur_size * h; | ||
836 | size_bytes -= cur_size * h; | ||
837 | } | ||
838 | } | 782 | } |
839 | } | 783 | } |
840 | |||
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index cf83aa05a684..0a2e023c1557 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -162,7 +162,7 @@ static const struct gpu_formats color_formats_table[] = { | |||
162 | [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, | 162 | [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, |
163 | }; | 163 | }; |
164 | 164 | ||
165 | static inline bool fmt_is_valid_color(u32 format) | 165 | static bool fmt_is_valid_color(u32 format) |
166 | { | 166 | { |
167 | if (format >= ARRAY_SIZE(color_formats_table)) | 167 | if (format >= ARRAY_SIZE(color_formats_table)) |
168 | return false; | 168 | return false; |
@@ -173,7 +173,7 @@ static inline bool fmt_is_valid_color(u32 format) | |||
173 | return false; | 173 | return false; |
174 | } | 174 | } |
175 | 175 | ||
176 | static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family) | 176 | static bool fmt_is_valid_texture(u32 format, enum radeon_family family) |
177 | { | 177 | { |
178 | if (format >= ARRAY_SIZE(color_formats_table)) | 178 | if (format >= ARRAY_SIZE(color_formats_table)) |
179 | return false; | 179 | return false; |
@@ -187,7 +187,7 @@ static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family) | |||
187 | return false; | 187 | return false; |
188 | } | 188 | } |
189 | 189 | ||
190 | static inline int fmt_get_blocksize(u32 format) | 190 | static int fmt_get_blocksize(u32 format) |
191 | { | 191 | { |
192 | if (format >= ARRAY_SIZE(color_formats_table)) | 192 | if (format >= ARRAY_SIZE(color_formats_table)) |
193 | return 0; | 193 | return 0; |
@@ -195,7 +195,7 @@ static inline int fmt_get_blocksize(u32 format) | |||
195 | return color_formats_table[format].blocksize; | 195 | return color_formats_table[format].blocksize; |
196 | } | 196 | } |
197 | 197 | ||
198 | static inline int fmt_get_nblocksx(u32 format, u32 w) | 198 | static int fmt_get_nblocksx(u32 format, u32 w) |
199 | { | 199 | { |
200 | unsigned bw; | 200 | unsigned bw; |
201 | 201 | ||
@@ -209,7 +209,7 @@ static inline int fmt_get_nblocksx(u32 format, u32 w) | |||
209 | return (w + bw - 1) / bw; | 209 | return (w + bw - 1) / bw; |
210 | } | 210 | } |
211 | 211 | ||
212 | static inline int fmt_get_nblocksy(u32 format, u32 h) | 212 | static int fmt_get_nblocksy(u32 format, u32 h) |
213 | { | 213 | { |
214 | unsigned bh; | 214 | unsigned bh; |
215 | 215 | ||
@@ -223,25 +223,6 @@ static inline int fmt_get_nblocksy(u32 format, u32 h) | |||
223 | return (h + bh - 1) / bh; | 223 | return (h + bh - 1) / bh; |
224 | } | 224 | } |
225 | 225 | ||
226 | static inline int r600_bpe_from_format(u32 *bpe, u32 format) | ||
227 | { | ||
228 | unsigned res; | ||
229 | |||
230 | if (format >= ARRAY_SIZE(color_formats_table)) | ||
231 | goto fail; | ||
232 | |||
233 | res = color_formats_table[format].blocksize; | ||
234 | if (res == 0) | ||
235 | goto fail; | ||
236 | |||
237 | *bpe = res; | ||
238 | return 0; | ||
239 | |||
240 | fail: | ||
241 | *bpe = 16; | ||
242 | return -EINVAL; | ||
243 | } | ||
244 | |||
245 | struct array_mode_checker { | 226 | struct array_mode_checker { |
246 | int array_mode; | 227 | int array_mode; |
247 | u32 group_size; | 228 | u32 group_size; |
@@ -252,7 +233,7 @@ struct array_mode_checker { | |||
252 | }; | 233 | }; |
253 | 234 | ||
254 | /* returns alignment in pixels for pitch/height/depth and bytes for base */ | 235 | /* returns alignment in pixels for pitch/height/depth and bytes for base */ |
255 | static inline int r600_get_array_mode_alignment(struct array_mode_checker *values, | 236 | static int r600_get_array_mode_alignment(struct array_mode_checker *values, |
256 | u32 *pitch_align, | 237 | u32 *pitch_align, |
257 | u32 *height_align, | 238 | u32 *height_align, |
258 | u32 *depth_align, | 239 | u32 *depth_align, |
@@ -331,7 +312,7 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
331 | track->db_depth_control = 0xFFFFFFFF; | 312 | track->db_depth_control = 0xFFFFFFFF; |
332 | } | 313 | } |
333 | 314 | ||
334 | static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | 315 | static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) |
335 | { | 316 | { |
336 | struct r600_cs_track *track = p->track; | 317 | struct r600_cs_track *track = p->track; |
337 | u32 slice_tile_max, size, tmp; | 318 | u32 slice_tile_max, size, tmp; |
@@ -737,7 +718,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
737 | * Check next packet is relocation packet3, do bo validation and compute | 718 | * Check next packet is relocation packet3, do bo validation and compute |
738 | * GPU offset using the provided start. | 719 | * GPU offset using the provided start. |
739 | **/ | 720 | **/ |
740 | static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) | 721 | static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) |
741 | { | 722 | { |
742 | struct radeon_cs_packet p3reloc; | 723 | struct radeon_cs_packet p3reloc; |
743 | int r; | 724 | int r; |
@@ -911,7 +892,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p, | |||
911 | * if register is safe. If register is not flag as safe this function | 892 | * if register is safe. If register is not flag as safe this function |
912 | * will test it against a list of register needind special handling. | 893 | * will test it against a list of register needind special handling. |
913 | */ | 894 | */ |
914 | static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | 895 | static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) |
915 | { | 896 | { |
916 | struct r600_cs_track *track = (struct r600_cs_track *)p->track; | 897 | struct r600_cs_track *track = (struct r600_cs_track *)p->track; |
917 | struct radeon_cs_reloc *reloc; | 898 | struct radeon_cs_reloc *reloc; |
@@ -1215,7 +1196,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
1215 | return 0; | 1196 | return 0; |
1216 | } | 1197 | } |
1217 | 1198 | ||
1218 | static inline unsigned mip_minify(unsigned size, unsigned level) | 1199 | static unsigned mip_minify(unsigned size, unsigned level) |
1219 | { | 1200 | { |
1220 | unsigned val; | 1201 | unsigned val; |
1221 | 1202 | ||
@@ -1285,7 +1266,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, | |||
1285 | * This function will check that the resource has valid field and that | 1266 | * This function will check that the resource has valid field and that |
1286 | * the texture and mipmap bo object are big enough to cover this resource. | 1267 | * the texture and mipmap bo object are big enough to cover this resource. |
1287 | */ | 1268 | */ |
1288 | static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | 1269 | static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, |
1289 | struct radeon_bo *texture, | 1270 | struct radeon_bo *texture, |
1290 | struct radeon_bo *mipmap, | 1271 | struct radeon_bo *mipmap, |
1291 | u64 base_offset, | 1272 | u64 base_offset, |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 0245ae6c204e..bfe1b5d92afe 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -79,6 +79,11 @@ | |||
79 | #define CB_COLOR0_SIZE 0x28060 | 79 | #define CB_COLOR0_SIZE 0x28060 |
80 | #define CB_COLOR0_VIEW 0x28080 | 80 | #define CB_COLOR0_VIEW 0x28080 |
81 | #define CB_COLOR0_INFO 0x280a0 | 81 | #define CB_COLOR0_INFO 0x280a0 |
82 | # define CB_FORMAT(x) ((x) << 2) | ||
83 | # define CB_ARRAY_MODE(x) ((x) << 8) | ||
84 | # define CB_SOURCE_FORMAT(x) ((x) << 27) | ||
85 | # define CB_SF_EXPORT_FULL 0 | ||
86 | # define CB_SF_EXPORT_NORM 1 | ||
82 | #define CB_COLOR0_TILE 0x280c0 | 87 | #define CB_COLOR0_TILE 0x280c0 |
83 | #define CB_COLOR0_FRAG 0x280e0 | 88 | #define CB_COLOR0_FRAG 0x280e0 |
84 | #define CB_COLOR0_MASK 0x28100 | 89 | #define CB_COLOR0_MASK 0x28100 |
@@ -417,6 +422,17 @@ | |||
417 | #define SQ_PGM_START_VS 0x28858 | 422 | #define SQ_PGM_START_VS 0x28858 |
418 | #define SQ_PGM_RESOURCES_VS 0x28868 | 423 | #define SQ_PGM_RESOURCES_VS 0x28868 |
419 | #define SQ_PGM_CF_OFFSET_VS 0x288d0 | 424 | #define SQ_PGM_CF_OFFSET_VS 0x288d0 |
425 | |||
426 | #define SQ_VTX_CONSTANT_WORD0_0 0x30000 | ||
427 | #define SQ_VTX_CONSTANT_WORD1_0 0x30004 | ||
428 | #define SQ_VTX_CONSTANT_WORD2_0 0x30008 | ||
429 | # define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0) | ||
430 | # define SQ_VTXC_STRIDE(x) ((x) << 8) | ||
431 | # define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30) | ||
432 | # define SQ_ENDIAN_NONE 0 | ||
433 | # define SQ_ENDIAN_8IN16 1 | ||
434 | # define SQ_ENDIAN_8IN32 2 | ||
435 | #define SQ_VTX_CONSTANT_WORD3_0 0x3000c | ||
420 | #define SQ_VTX_CONSTANT_WORD6_0 0x38018 | 436 | #define SQ_VTX_CONSTANT_WORD6_0 0x38018 |
421 | #define S__SQ_VTX_CONSTANT_TYPE(x) (((x) & 3) << 30) | 437 | #define S__SQ_VTX_CONSTANT_TYPE(x) (((x) & 3) << 30) |
422 | #define G__SQ_VTX_CONSTANT_TYPE(x) (((x) >> 30) & 3) | 438 | #define G__SQ_VTX_CONSTANT_TYPE(x) (((x) >> 30) & 3) |
@@ -1352,6 +1368,12 @@ | |||
1352 | #define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25) | 1368 | #define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25) |
1353 | #define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7) | 1369 | #define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7) |
1354 | #define C_038010_DST_SEL_W 0xF1FFFFFF | 1370 | #define C_038010_DST_SEL_W 0xF1FFFFFF |
1371 | # define SQ_SEL_X 0 | ||
1372 | # define SQ_SEL_Y 1 | ||
1373 | # define SQ_SEL_Z 2 | ||
1374 | # define SQ_SEL_W 3 | ||
1375 | # define SQ_SEL_0 4 | ||
1376 | # define SQ_SEL_1 5 | ||
1355 | #define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28) | 1377 | #define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28) |
1356 | #define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF) | 1378 | #define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF) |
1357 | #define C_038010_BASE_LEVEL 0x0FFFFFFF | 1379 | #define C_038010_BASE_LEVEL 0x0FFFFFFF |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index c1e056b35b29..e3170c794c1d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -102,7 +102,7 @@ extern int radeon_pcie_gen2; | |||
102 | #define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) | 102 | #define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) |
103 | /* RADEON_IB_POOL_SIZE must be a power of 2 */ | 103 | /* RADEON_IB_POOL_SIZE must be a power of 2 */ |
104 | #define RADEON_IB_POOL_SIZE 16 | 104 | #define RADEON_IB_POOL_SIZE 16 |
105 | #define RADEON_DEBUGFS_MAX_NUM_FILES 32 | 105 | #define RADEON_DEBUGFS_MAX_COMPONENTS 32 |
106 | #define RADEONFB_CONN_LIMIT 4 | 106 | #define RADEONFB_CONN_LIMIT 4 |
107 | #define RADEON_BIOS_NUM_SCRATCH 8 | 107 | #define RADEON_BIOS_NUM_SCRATCH 8 |
108 | 108 | ||
@@ -523,9 +523,30 @@ struct r600_ih { | |||
523 | bool enabled; | 523 | bool enabled; |
524 | }; | 524 | }; |
525 | 525 | ||
526 | struct r600_blit_cp_primitives { | ||
527 | void (*set_render_target)(struct radeon_device *rdev, int format, | ||
528 | int w, int h, u64 gpu_addr); | ||
529 | void (*cp_set_surface_sync)(struct radeon_device *rdev, | ||
530 | u32 sync_type, u32 size, | ||
531 | u64 mc_addr); | ||
532 | void (*set_shaders)(struct radeon_device *rdev); | ||
533 | void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr); | ||
534 | void (*set_tex_resource)(struct radeon_device *rdev, | ||
535 | int format, int w, int h, int pitch, | ||
536 | u64 gpu_addr); | ||
537 | void (*set_scissors)(struct radeon_device *rdev, int x1, int y1, | ||
538 | int x2, int y2); | ||
539 | void (*draw_auto)(struct radeon_device *rdev); | ||
540 | void (*set_default_state)(struct radeon_device *rdev); | ||
541 | }; | ||
542 | |||
526 | struct r600_blit { | 543 | struct r600_blit { |
527 | struct mutex mutex; | 544 | struct mutex mutex; |
528 | struct radeon_bo *shader_obj; | 545 | struct radeon_bo *shader_obj; |
546 | struct r600_blit_cp_primitives primitives; | ||
547 | int max_dim; | ||
548 | int ring_size_common; | ||
549 | int ring_size_per_loop; | ||
529 | u64 shader_gpu_addr; | 550 | u64 shader_gpu_addr; |
530 | u32 vs_offset, ps_offset; | 551 | u32 vs_offset, ps_offset; |
531 | u32 state_offset; | 552 | u32 state_offset; |
@@ -534,6 +555,8 @@ struct r600_blit { | |||
534 | struct radeon_ib *vb_ib; | 555 | struct radeon_ib *vb_ib; |
535 | }; | 556 | }; |
536 | 557 | ||
558 | void r600_blit_suspend(struct radeon_device *rdev); | ||
559 | |||
537 | int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib); | 560 | int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib); |
538 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); | 561 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); |
539 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); | 562 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); |
@@ -601,32 +624,7 @@ struct radeon_cs_parser { | |||
601 | 624 | ||
602 | extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); | 625 | extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); |
603 | extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); | 626 | extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); |
604 | 627 | extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx); | |
605 | |||
606 | static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) | ||
607 | { | ||
608 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; | ||
609 | u32 pg_idx, pg_offset; | ||
610 | u32 idx_value = 0; | ||
611 | int new_page; | ||
612 | |||
613 | pg_idx = (idx * 4) / PAGE_SIZE; | ||
614 | pg_offset = (idx * 4) % PAGE_SIZE; | ||
615 | |||
616 | if (ibc->kpage_idx[0] == pg_idx) | ||
617 | return ibc->kpage[0][pg_offset/4]; | ||
618 | if (ibc->kpage_idx[1] == pg_idx) | ||
619 | return ibc->kpage[1][pg_offset/4]; | ||
620 | |||
621 | new_page = radeon_cs_update_pages(p, pg_idx); | ||
622 | if (new_page < 0) { | ||
623 | p->parser_error = new_page; | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | idx_value = ibc->kpage[new_page][pg_offset/4]; | ||
628 | return idx_value; | ||
629 | } | ||
630 | 628 | ||
631 | struct radeon_cs_packet { | 629 | struct radeon_cs_packet { |
632 | unsigned idx; | 630 | unsigned idx; |
@@ -869,7 +867,7 @@ struct radeon_pm { | |||
869 | /* | 867 | /* |
870 | * Benchmarking | 868 | * Benchmarking |
871 | */ | 869 | */ |
872 | void radeon_benchmark(struct radeon_device *rdev); | 870 | void radeon_benchmark(struct radeon_device *rdev, int test_number); |
873 | 871 | ||
874 | 872 | ||
875 | /* | 873 | /* |
@@ -1252,45 +1250,10 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1252 | void radeon_device_fini(struct radeon_device *rdev); | 1250 | void radeon_device_fini(struct radeon_device *rdev); |
1253 | int radeon_gpu_wait_for_idle(struct radeon_device *rdev); | 1251 | int radeon_gpu_wait_for_idle(struct radeon_device *rdev); |
1254 | 1252 | ||
1255 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | 1253 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); |
1256 | { | 1254 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
1257 | if (reg < rdev->rmmio_size) | 1255 | u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); |
1258 | return readl((rdev->rmmio) + reg); | 1256 | void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
1259 | else { | ||
1260 | writel(reg, (rdev->rmmio) + RADEON_MM_INDEX); | ||
1261 | return readl((rdev->rmmio) + RADEON_MM_DATA); | ||
1262 | } | ||
1263 | } | ||
1264 | |||
1265 | static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
1266 | { | ||
1267 | if (reg < rdev->rmmio_size) | ||
1268 | writel(v, (rdev->rmmio) + reg); | ||
1269 | else { | ||
1270 | writel(reg, (rdev->rmmio) + RADEON_MM_INDEX); | ||
1271 | writel(v, (rdev->rmmio) + RADEON_MM_DATA); | ||
1272 | } | ||
1273 | } | ||
1274 | |||
1275 | static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) | ||
1276 | { | ||
1277 | if (reg < rdev->rio_mem_size) | ||
1278 | return ioread32(rdev->rio_mem + reg); | ||
1279 | else { | ||
1280 | iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); | ||
1281 | return ioread32(rdev->rio_mem + RADEON_MM_DATA); | ||
1282 | } | ||
1283 | } | ||
1284 | |||
1285 | static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) | ||
1286 | { | ||
1287 | if (reg < rdev->rio_mem_size) | ||
1288 | iowrite32(v, rdev->rio_mem + reg); | ||
1289 | else { | ||
1290 | iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); | ||
1291 | iowrite32(v, rdev->rio_mem + RADEON_MM_DATA); | ||
1292 | } | ||
1293 | } | ||
1294 | 1257 | ||
1295 | /* | 1258 | /* |
1296 | * Cast helper | 1259 | * Cast helper |
@@ -1413,19 +1376,19 @@ void radeon_atombios_fini(struct radeon_device *rdev); | |||
1413 | /* | 1376 | /* |
1414 | * RING helpers. | 1377 | * RING helpers. |
1415 | */ | 1378 | */ |
1379 | |||
1380 | #if DRM_DEBUG_CODE == 0 | ||
1416 | static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | 1381 | static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) |
1417 | { | 1382 | { |
1418 | #if DRM_DEBUG_CODE | ||
1419 | if (rdev->cp.count_dw <= 0) { | ||
1420 | DRM_ERROR("radeon: writting more dword to ring than expected !\n"); | ||
1421 | } | ||
1422 | #endif | ||
1423 | rdev->cp.ring[rdev->cp.wptr++] = v; | 1383 | rdev->cp.ring[rdev->cp.wptr++] = v; |
1424 | rdev->cp.wptr &= rdev->cp.ptr_mask; | 1384 | rdev->cp.wptr &= rdev->cp.ptr_mask; |
1425 | rdev->cp.count_dw--; | 1385 | rdev->cp.count_dw--; |
1426 | rdev->cp.ring_free_dw--; | 1386 | rdev->cp.ring_free_dw--; |
1427 | } | 1387 | } |
1428 | 1388 | #else | |
1389 | /* With debugging this is just too big to inline */ | ||
1390 | void radeon_ring_write(struct radeon_device *rdev, uint32_t v); | ||
1391 | #endif | ||
1429 | 1392 | ||
1430 | /* | 1393 | /* |
1431 | * ASICs macro. | 1394 | * ASICs macro. |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index df8218bb83a6..e2944566ffea 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -765,9 +765,9 @@ static struct radeon_asic evergreen_asic = { | |||
765 | .get_vblank_counter = &evergreen_get_vblank_counter, | 765 | .get_vblank_counter = &evergreen_get_vblank_counter, |
766 | .fence_ring_emit = &r600_fence_ring_emit, | 766 | .fence_ring_emit = &r600_fence_ring_emit, |
767 | .cs_parse = &evergreen_cs_parse, | 767 | .cs_parse = &evergreen_cs_parse, |
768 | .copy_blit = &evergreen_copy_blit, | 768 | .copy_blit = &r600_copy_blit, |
769 | .copy_dma = NULL, | 769 | .copy_dma = NULL, |
770 | .copy = &evergreen_copy_blit, | 770 | .copy = &r600_copy_blit, |
771 | .get_engine_clock = &radeon_atom_get_engine_clock, | 771 | .get_engine_clock = &radeon_atom_get_engine_clock, |
772 | .set_engine_clock = &radeon_atom_set_engine_clock, | 772 | .set_engine_clock = &radeon_atom_set_engine_clock, |
773 | .get_memory_clock = &radeon_atom_get_memory_clock, | 773 | .get_memory_clock = &radeon_atom_get_memory_clock, |
@@ -812,9 +812,9 @@ static struct radeon_asic sumo_asic = { | |||
812 | .get_vblank_counter = &evergreen_get_vblank_counter, | 812 | .get_vblank_counter = &evergreen_get_vblank_counter, |
813 | .fence_ring_emit = &r600_fence_ring_emit, | 813 | .fence_ring_emit = &r600_fence_ring_emit, |
814 | .cs_parse = &evergreen_cs_parse, | 814 | .cs_parse = &evergreen_cs_parse, |
815 | .copy_blit = &evergreen_copy_blit, | 815 | .copy_blit = &r600_copy_blit, |
816 | .copy_dma = NULL, | 816 | .copy_dma = NULL, |
817 | .copy = &evergreen_copy_blit, | 817 | .copy = &r600_copy_blit, |
818 | .get_engine_clock = &radeon_atom_get_engine_clock, | 818 | .get_engine_clock = &radeon_atom_get_engine_clock, |
819 | .set_engine_clock = &radeon_atom_set_engine_clock, | 819 | .set_engine_clock = &radeon_atom_set_engine_clock, |
820 | .get_memory_clock = NULL, | 820 | .get_memory_clock = NULL, |
@@ -859,9 +859,9 @@ static struct radeon_asic btc_asic = { | |||
859 | .get_vblank_counter = &evergreen_get_vblank_counter, | 859 | .get_vblank_counter = &evergreen_get_vblank_counter, |
860 | .fence_ring_emit = &r600_fence_ring_emit, | 860 | .fence_ring_emit = &r600_fence_ring_emit, |
861 | .cs_parse = &evergreen_cs_parse, | 861 | .cs_parse = &evergreen_cs_parse, |
862 | .copy_blit = &evergreen_copy_blit, | 862 | .copy_blit = &r600_copy_blit, |
863 | .copy_dma = NULL, | 863 | .copy_dma = NULL, |
864 | .copy = &evergreen_copy_blit, | 864 | .copy = &r600_copy_blit, |
865 | .get_engine_clock = &radeon_atom_get_engine_clock, | 865 | .get_engine_clock = &radeon_atom_get_engine_clock, |
866 | .set_engine_clock = &radeon_atom_set_engine_clock, | 866 | .set_engine_clock = &radeon_atom_set_engine_clock, |
867 | .get_memory_clock = &radeon_atom_get_memory_clock, | 867 | .get_memory_clock = &radeon_atom_get_memory_clock, |
@@ -906,9 +906,9 @@ static struct radeon_asic cayman_asic = { | |||
906 | .get_vblank_counter = &evergreen_get_vblank_counter, | 906 | .get_vblank_counter = &evergreen_get_vblank_counter, |
907 | .fence_ring_emit = &r600_fence_ring_emit, | 907 | .fence_ring_emit = &r600_fence_ring_emit, |
908 | .cs_parse = &evergreen_cs_parse, | 908 | .cs_parse = &evergreen_cs_parse, |
909 | .copy_blit = &evergreen_copy_blit, | 909 | .copy_blit = &r600_copy_blit, |
910 | .copy_dma = NULL, | 910 | .copy_dma = NULL, |
911 | .copy = &evergreen_copy_blit, | 911 | .copy = &r600_copy_blit, |
912 | .get_engine_clock = &radeon_atom_get_engine_clock, | 912 | .get_engine_clock = &radeon_atom_get_engine_clock, |
913 | .set_engine_clock = &radeon_atom_set_engine_clock, | 913 | .set_engine_clock = &radeon_atom_set_engine_clock, |
914 | .get_memory_clock = &radeon_atom_get_memory_clock, | 914 | .get_memory_clock = &radeon_atom_get_memory_clock, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3dedaa07aac1..85f14f0337e4 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -364,11 +364,11 @@ void r600_hdmi_init(struct drm_encoder *encoder); | |||
364 | int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); | 364 | int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); |
365 | void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); | 365 | void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); |
366 | /* r600 blit */ | 366 | /* r600 blit */ |
367 | int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); | 367 | int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages); |
368 | void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); | 368 | void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); |
369 | void r600_kms_blit_copy(struct radeon_device *rdev, | 369 | void r600_kms_blit_copy(struct radeon_device *rdev, |
370 | u64 src_gpu_addr, u64 dst_gpu_addr, | 370 | u64 src_gpu_addr, u64 dst_gpu_addr, |
371 | int size_bytes); | 371 | unsigned num_gpu_pages); |
372 | 372 | ||
373 | /* | 373 | /* |
374 | * rv770,rv730,rv710,rv740 | 374 | * rv770,rv730,rv710,rv740 |
@@ -401,9 +401,6 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev); | |||
401 | int evergreen_asic_reset(struct radeon_device *rdev); | 401 | int evergreen_asic_reset(struct radeon_device *rdev); |
402 | void evergreen_bandwidth_update(struct radeon_device *rdev); | 402 | void evergreen_bandwidth_update(struct radeon_device *rdev); |
403 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 403 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
404 | int evergreen_copy_blit(struct radeon_device *rdev, | ||
405 | uint64_t src_offset, uint64_t dst_offset, | ||
406 | unsigned num_gpu_pages, struct radeon_fence *fence); | ||
407 | void evergreen_hpd_init(struct radeon_device *rdev); | 404 | void evergreen_hpd_init(struct radeon_device *rdev); |
408 | void evergreen_hpd_fini(struct radeon_device *rdev); | 405 | void evergreen_hpd_fini(struct radeon_device *rdev); |
409 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 406 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
@@ -421,13 +418,6 @@ extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_ba | |||
421 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); | 418 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); |
422 | void evergreen_disable_interrupt_state(struct radeon_device *rdev); | 419 | void evergreen_disable_interrupt_state(struct radeon_device *rdev); |
423 | int evergreen_blit_init(struct radeon_device *rdev); | 420 | int evergreen_blit_init(struct radeon_device *rdev); |
424 | void evergreen_blit_fini(struct radeon_device *rdev); | ||
425 | /* evergreen blit */ | ||
426 | int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); | ||
427 | void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); | ||
428 | void evergreen_kms_blit_copy(struct radeon_device *rdev, | ||
429 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
430 | int size_bytes); | ||
431 | 421 | ||
432 | /* | 422 | /* |
433 | * cayman | 423 | * cayman |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index bf2b61584cdb..08d0b94332e6 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -62,7 +62,7 @@ union atom_supported_devices { | |||
62 | struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; | 62 | struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, | 65 | static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, |
66 | uint8_t id) | 66 | uint8_t id) |
67 | { | 67 | { |
68 | struct atom_context *ctx = rdev->mode_info.atom_context; | 68 | struct atom_context *ctx = rdev->mode_info.atom_context; |
@@ -228,7 +228,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
228 | } | 228 | } |
229 | } | 229 | } |
230 | 230 | ||
231 | static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, | 231 | static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, |
232 | u8 id) | 232 | u8 id) |
233 | { | 233 | { |
234 | struct atom_context *ctx = rdev->mode_info.atom_context; | 234 | struct atom_context *ctx = rdev->mode_info.atom_context; |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 10191d9372d8..5cafc90de7f8 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -26,21 +26,81 @@ | |||
26 | #include "radeon_reg.h" | 26 | #include "radeon_reg.h" |
27 | #include "radeon.h" | 27 | #include "radeon.h" |
28 | 28 | ||
29 | void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | 29 | #define RADEON_BENCHMARK_COPY_BLIT 1 |
30 | unsigned sdomain, unsigned ddomain) | 30 | #define RADEON_BENCHMARK_COPY_DMA 0 |
31 | |||
32 | #define RADEON_BENCHMARK_ITERATIONS 1024 | ||
33 | #define RADEON_BENCHMARK_COMMON_MODES_N 17 | ||
34 | |||
35 | static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, | ||
36 | uint64_t saddr, uint64_t daddr, | ||
37 | int flag, int n) | ||
38 | { | ||
39 | unsigned long start_jiffies; | ||
40 | unsigned long end_jiffies; | ||
41 | struct radeon_fence *fence = NULL; | ||
42 | int i, r; | ||
43 | |||
44 | start_jiffies = jiffies; | ||
45 | for (i = 0; i < n; i++) { | ||
46 | r = radeon_fence_create(rdev, &fence); | ||
47 | if (r) | ||
48 | return r; | ||
49 | |||
50 | switch (flag) { | ||
51 | case RADEON_BENCHMARK_COPY_DMA: | ||
52 | r = radeon_copy_dma(rdev, saddr, daddr, | ||
53 | size / RADEON_GPU_PAGE_SIZE, | ||
54 | fence); | ||
55 | break; | ||
56 | case RADEON_BENCHMARK_COPY_BLIT: | ||
57 | r = radeon_copy_blit(rdev, saddr, daddr, | ||
58 | size / RADEON_GPU_PAGE_SIZE, | ||
59 | fence); | ||
60 | break; | ||
61 | default: | ||
62 | DRM_ERROR("Unknown copy method\n"); | ||
63 | r = -EINVAL; | ||
64 | } | ||
65 | if (r) | ||
66 | goto exit_do_move; | ||
67 | r = radeon_fence_wait(fence, false); | ||
68 | if (r) | ||
69 | goto exit_do_move; | ||
70 | radeon_fence_unref(&fence); | ||
71 | } | ||
72 | end_jiffies = jiffies; | ||
73 | r = jiffies_to_msecs(end_jiffies - start_jiffies); | ||
74 | |||
75 | exit_do_move: | ||
76 | if (fence) | ||
77 | radeon_fence_unref(&fence); | ||
78 | return r; | ||
79 | } | ||
80 | |||
81 | |||
82 | static void radeon_benchmark_log_results(int n, unsigned size, | ||
83 | unsigned int time, | ||
84 | unsigned sdomain, unsigned ddomain, | ||
85 | char *kind) | ||
86 | { | ||
87 | unsigned int throughput = (n * (size >> 10)) / time; | ||
88 | DRM_INFO("radeon: %s %u bo moves of %u kB from" | ||
89 | " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n", | ||
90 | kind, n, size >> 10, sdomain, ddomain, time, | ||
91 | throughput * 8, throughput); | ||
92 | } | ||
93 | |||
94 | static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, | ||
95 | unsigned sdomain, unsigned ddomain) | ||
31 | { | 96 | { |
32 | struct radeon_bo *dobj = NULL; | 97 | struct radeon_bo *dobj = NULL; |
33 | struct radeon_bo *sobj = NULL; | 98 | struct radeon_bo *sobj = NULL; |
34 | struct radeon_fence *fence = NULL; | ||
35 | uint64_t saddr, daddr; | 99 | uint64_t saddr, daddr; |
36 | unsigned long start_jiffies; | 100 | int r, n; |
37 | unsigned long end_jiffies; | 101 | unsigned int time; |
38 | unsigned long time; | ||
39 | unsigned i, n, size; | ||
40 | int r; | ||
41 | 102 | ||
42 | size = bsize; | 103 | n = RADEON_BENCHMARK_ITERATIONS; |
43 | n = 1024; | ||
44 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); | 104 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); |
45 | if (r) { | 105 | if (r) { |
46 | goto out_cleanup; | 106 | goto out_cleanup; |
@@ -67,65 +127,26 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
67 | } | 127 | } |
68 | 128 | ||
69 | /* r100 doesn't have dma engine so skip the test */ | 129 | /* r100 doesn't have dma engine so skip the test */ |
70 | if (rdev->asic->copy_dma) { | 130 | /* also, VRAM-to-VRAM test doesn't make much sense for DMA */ |
71 | 131 | /* skip it as well if domains are the same */ | |
72 | start_jiffies = jiffies; | 132 | if ((rdev->asic->copy_dma) && (sdomain != ddomain)) { |
73 | for (i = 0; i < n; i++) { | 133 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, |
74 | r = radeon_fence_create(rdev, &fence); | 134 | RADEON_BENCHMARK_COPY_DMA, n); |
75 | if (r) { | 135 | if (time < 0) |
76 | goto out_cleanup; | ||
77 | } | ||
78 | |||
79 | r = radeon_copy_dma(rdev, saddr, daddr, | ||
80 | size / RADEON_GPU_PAGE_SIZE, fence); | ||
81 | |||
82 | if (r) { | ||
83 | goto out_cleanup; | ||
84 | } | ||
85 | r = radeon_fence_wait(fence, false); | ||
86 | if (r) { | ||
87 | goto out_cleanup; | ||
88 | } | ||
89 | radeon_fence_unref(&fence); | ||
90 | } | ||
91 | end_jiffies = jiffies; | ||
92 | time = end_jiffies - start_jiffies; | ||
93 | time = jiffies_to_msecs(time); | ||
94 | if (time > 0) { | ||
95 | i = ((n * size) >> 10) / time; | ||
96 | printk(KERN_INFO "radeon: dma %u bo moves of %ukb from" | ||
97 | " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n", | ||
98 | n, size >> 10, | ||
99 | sdomain, ddomain, time, | ||
100 | i, i * 1000, (i * 1000) / 1024); | ||
101 | } | ||
102 | } | ||
103 | |||
104 | start_jiffies = jiffies; | ||
105 | for (i = 0; i < n; i++) { | ||
106 | r = radeon_fence_create(rdev, &fence); | ||
107 | if (r) { | ||
108 | goto out_cleanup; | ||
109 | } | ||
110 | r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); | ||
111 | if (r) { | ||
112 | goto out_cleanup; | ||
113 | } | ||
114 | r = radeon_fence_wait(fence, false); | ||
115 | if (r) { | ||
116 | goto out_cleanup; | 136 | goto out_cleanup; |
117 | } | 137 | if (time > 0) |
118 | radeon_fence_unref(&fence); | 138 | radeon_benchmark_log_results(n, size, time, |
119 | } | 139 | sdomain, ddomain, "dma"); |
120 | end_jiffies = jiffies; | ||
121 | time = end_jiffies - start_jiffies; | ||
122 | time = jiffies_to_msecs(time); | ||
123 | if (time > 0) { | ||
124 | i = ((n * size) >> 10) / time; | ||
125 | printk(KERN_INFO "radeon: blit %u bo moves of %ukb from %d to %d" | ||
126 | " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10, | ||
127 | sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024); | ||
128 | } | 140 | } |
141 | |||
142 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, | ||
143 | RADEON_BENCHMARK_COPY_BLIT, n); | ||
144 | if (time < 0) | ||
145 | goto out_cleanup; | ||
146 | if (time > 0) | ||
147 | radeon_benchmark_log_results(n, size, time, | ||
148 | sdomain, ddomain, "blit"); | ||
149 | |||
129 | out_cleanup: | 150 | out_cleanup: |
130 | if (sobj) { | 151 | if (sobj) { |
131 | r = radeon_bo_reserve(sobj, false); | 152 | r = radeon_bo_reserve(sobj, false); |
@@ -143,18 +164,92 @@ out_cleanup: | |||
143 | } | 164 | } |
144 | radeon_bo_unref(&dobj); | 165 | radeon_bo_unref(&dobj); |
145 | } | 166 | } |
146 | if (fence) { | 167 | |
147 | radeon_fence_unref(&fence); | ||
148 | } | ||
149 | if (r) { | 168 | if (r) { |
150 | printk(KERN_WARNING "Error while benchmarking BO move.\n"); | 169 | DRM_ERROR("Error while benchmarking BO move.\n"); |
151 | } | 170 | } |
152 | } | 171 | } |
153 | 172 | ||
154 | void radeon_benchmark(struct radeon_device *rdev) | 173 | void radeon_benchmark(struct radeon_device *rdev, int test_number) |
155 | { | 174 | { |
156 | radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT, | 175 | int i; |
157 | RADEON_GEM_DOMAIN_VRAM); | 176 | int common_modes[RADEON_BENCHMARK_COMMON_MODES_N] = { |
158 | radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM, | 177 | 640 * 480 * 4, |
159 | RADEON_GEM_DOMAIN_GTT); | 178 | 720 * 480 * 4, |
179 | 800 * 600 * 4, | ||
180 | 848 * 480 * 4, | ||
181 | 1024 * 768 * 4, | ||
182 | 1152 * 768 * 4, | ||
183 | 1280 * 720 * 4, | ||
184 | 1280 * 800 * 4, | ||
185 | 1280 * 854 * 4, | ||
186 | 1280 * 960 * 4, | ||
187 | 1280 * 1024 * 4, | ||
188 | 1440 * 900 * 4, | ||
189 | 1400 * 1050 * 4, | ||
190 | 1680 * 1050 * 4, | ||
191 | 1600 * 1200 * 4, | ||
192 | 1920 * 1080 * 4, | ||
193 | 1920 * 1200 * 4 | ||
194 | }; | ||
195 | |||
196 | switch (test_number) { | ||
197 | case 1: | ||
198 | /* simple test, VRAM to GTT and GTT to VRAM */ | ||
199 | radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT, | ||
200 | RADEON_GEM_DOMAIN_VRAM); | ||
201 | radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM, | ||
202 | RADEON_GEM_DOMAIN_GTT); | ||
203 | break; | ||
204 | case 2: | ||
205 | /* simple test, VRAM to VRAM */ | ||
206 | radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM, | ||
207 | RADEON_GEM_DOMAIN_VRAM); | ||
208 | break; | ||
209 | case 3: | ||
210 | /* GTT to VRAM, buffer size sweep, powers of 2 */ | ||
211 | for (i = 1; i <= 65536; i <<= 1) | ||
212 | radeon_benchmark_move(rdev, i*1024, | ||
213 | RADEON_GEM_DOMAIN_GTT, | ||
214 | RADEON_GEM_DOMAIN_VRAM); | ||
215 | break; | ||
216 | case 4: | ||
217 | /* VRAM to GTT, buffer size sweep, powers of 2 */ | ||
218 | for (i = 1; i <= 65536; i <<= 1) | ||
219 | radeon_benchmark_move(rdev, i*1024, | ||
220 | RADEON_GEM_DOMAIN_VRAM, | ||
221 | RADEON_GEM_DOMAIN_GTT); | ||
222 | break; | ||
223 | case 5: | ||
224 | /* VRAM to VRAM, buffer size sweep, powers of 2 */ | ||
225 | for (i = 1; i <= 65536; i <<= 1) | ||
226 | radeon_benchmark_move(rdev, i*1024, | ||
227 | RADEON_GEM_DOMAIN_VRAM, | ||
228 | RADEON_GEM_DOMAIN_VRAM); | ||
229 | break; | ||
230 | case 6: | ||
231 | /* GTT to VRAM, buffer size sweep, common modes */ | ||
232 | for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++) | ||
233 | radeon_benchmark_move(rdev, common_modes[i], | ||
234 | RADEON_GEM_DOMAIN_GTT, | ||
235 | RADEON_GEM_DOMAIN_VRAM); | ||
236 | break; | ||
237 | case 7: | ||
238 | /* VRAM to GTT, buffer size sweep, common modes */ | ||
239 | for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++) | ||
240 | radeon_benchmark_move(rdev, common_modes[i], | ||
241 | RADEON_GEM_DOMAIN_VRAM, | ||
242 | RADEON_GEM_DOMAIN_GTT); | ||
243 | break; | ||
244 | case 8: | ||
245 | /* VRAM to VRAM, buffer size sweep, common modes */ | ||
246 | for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++) | ||
247 | radeon_benchmark_move(rdev, common_modes[i], | ||
248 | RADEON_GEM_DOMAIN_VRAM, | ||
249 | RADEON_GEM_DOMAIN_VRAM); | ||
250 | break; | ||
251 | |||
252 | default: | ||
253 | DRM_ERROR("Unknown benchmark\n"); | ||
254 | } | ||
160 | } | 255 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 63675241c7ff..8bf83c4b4147 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -620,8 +620,8 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
620 | i2c.y_data_mask = 0x80; | 620 | i2c.y_data_mask = 0x80; |
621 | } else { | 621 | } else { |
622 | /* default masks for ddc pads */ | 622 | /* default masks for ddc pads */ |
623 | i2c.mask_clk_mask = RADEON_GPIO_EN_1; | 623 | i2c.mask_clk_mask = RADEON_GPIO_MASK_1; |
624 | i2c.mask_data_mask = RADEON_GPIO_EN_0; | 624 | i2c.mask_data_mask = RADEON_GPIO_MASK_0; |
625 | i2c.a_clk_mask = RADEON_GPIO_A_1; | 625 | i2c.a_clk_mask = RADEON_GPIO_A_1; |
626 | i2c.a_data_mask = RADEON_GPIO_A_0; | 626 | i2c.a_data_mask = RADEON_GPIO_A_0; |
627 | i2c.en_clk_mask = RADEON_GPIO_EN_1; | 627 | i2c.en_clk_mask = RADEON_GPIO_EN_1; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 449c3d8c6836..dec6cbe6a0a6 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -724,6 +724,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
724 | dret = radeon_ddc_probe(radeon_connector, | 724 | dret = radeon_ddc_probe(radeon_connector, |
725 | radeon_connector->requires_extended_probe); | 725 | radeon_connector->requires_extended_probe); |
726 | if (dret) { | 726 | if (dret) { |
727 | radeon_connector->detected_by_load = false; | ||
727 | if (radeon_connector->edid) { | 728 | if (radeon_connector->edid) { |
728 | kfree(radeon_connector->edid); | 729 | kfree(radeon_connector->edid); |
729 | radeon_connector->edid = NULL; | 730 | radeon_connector->edid = NULL; |
@@ -750,12 +751,21 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
750 | } else { | 751 | } else { |
751 | 752 | ||
752 | /* if we aren't forcing don't do destructive polling */ | 753 | /* if we aren't forcing don't do destructive polling */ |
753 | if (!force) | 754 | if (!force) { |
754 | return connector->status; | 755 | /* only return the previous status if we last |
756 | * detected a monitor via load. | ||
757 | */ | ||
758 | if (radeon_connector->detected_by_load) | ||
759 | return connector->status; | ||
760 | else | ||
761 | return ret; | ||
762 | } | ||
755 | 763 | ||
756 | if (radeon_connector->dac_load_detect && encoder) { | 764 | if (radeon_connector->dac_load_detect && encoder) { |
757 | encoder_funcs = encoder->helper_private; | 765 | encoder_funcs = encoder->helper_private; |
758 | ret = encoder_funcs->detect(encoder, connector); | 766 | ret = encoder_funcs->detect(encoder, connector); |
767 | if (ret == connector_status_connected) | ||
768 | radeon_connector->detected_by_load = true; | ||
759 | } | 769 | } |
760 | } | 770 | } |
761 | 771 | ||
@@ -897,6 +907,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
897 | dret = radeon_ddc_probe(radeon_connector, | 907 | dret = radeon_ddc_probe(radeon_connector, |
898 | radeon_connector->requires_extended_probe); | 908 | radeon_connector->requires_extended_probe); |
899 | if (dret) { | 909 | if (dret) { |
910 | radeon_connector->detected_by_load = false; | ||
900 | if (radeon_connector->edid) { | 911 | if (radeon_connector->edid) { |
901 | kfree(radeon_connector->edid); | 912 | kfree(radeon_connector->edid); |
902 | radeon_connector->edid = NULL; | 913 | radeon_connector->edid = NULL; |
@@ -959,8 +970,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
959 | if ((ret == connector_status_connected) && (radeon_connector->use_digital == true)) | 970 | if ((ret == connector_status_connected) && (radeon_connector->use_digital == true)) |
960 | goto out; | 971 | goto out; |
961 | 972 | ||
973 | /* DVI-D and HDMI-A are digital only */ | ||
974 | if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) || | ||
975 | (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA)) | ||
976 | goto out; | ||
977 | |||
978 | /* if we aren't forcing don't do destructive polling */ | ||
962 | if (!force) { | 979 | if (!force) { |
963 | ret = connector->status; | 980 | /* only return the previous status if we last |
981 | * detected a monitor via load. | ||
982 | */ | ||
983 | if (radeon_connector->detected_by_load) | ||
984 | ret = connector->status; | ||
964 | goto out; | 985 | goto out; |
965 | } | 986 | } |
966 | 987 | ||
@@ -984,6 +1005,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
984 | ret = encoder_funcs->detect(encoder, connector); | 1005 | ret = encoder_funcs->detect(encoder, connector); |
985 | if (ret == connector_status_connected) { | 1006 | if (ret == connector_status_connected) { |
986 | radeon_connector->use_digital = false; | 1007 | radeon_connector->use_digital = false; |
1008 | radeon_connector->detected_by_load = true; | ||
987 | } | 1009 | } |
988 | } | 1010 | } |
989 | break; | 1011 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b51e15725c6e..c33bc914d93d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -750,14 +750,15 @@ int radeon_device_init(struct radeon_device *rdev, | |||
750 | 750 | ||
751 | /* set DMA mask + need_dma32 flags. | 751 | /* set DMA mask + need_dma32 flags. |
752 | * PCIE - can handle 40-bits. | 752 | * PCIE - can handle 40-bits. |
753 | * IGP - can handle 40-bits (in theory) | 753 | * IGP - can handle 40-bits |
754 | * AGP - generally dma32 is safest | 754 | * AGP - generally dma32 is safest |
755 | * PCI - only dma32 | 755 | * PCI - dma32 for legacy pci gart, 40 bits on newer asics |
756 | */ | 756 | */ |
757 | rdev->need_dma32 = false; | 757 | rdev->need_dma32 = false; |
758 | if (rdev->flags & RADEON_IS_AGP) | 758 | if (rdev->flags & RADEON_IS_AGP) |
759 | rdev->need_dma32 = true; | 759 | rdev->need_dma32 = true; |
760 | if (rdev->flags & RADEON_IS_PCI) | 760 | if ((rdev->flags & RADEON_IS_PCI) && |
761 | (rdev->family < CHIP_RS400)) | ||
761 | rdev->need_dma32 = true; | 762 | rdev->need_dma32 = true; |
762 | 763 | ||
763 | dma_bits = rdev->need_dma32 ? 32 : 40; | 764 | dma_bits = rdev->need_dma32 ? 32 : 40; |
@@ -817,7 +818,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
817 | radeon_test_moves(rdev); | 818 | radeon_test_moves(rdev); |
818 | } | 819 | } |
819 | if (radeon_benchmarking) { | 820 | if (radeon_benchmarking) { |
820 | radeon_benchmark(rdev); | 821 | radeon_benchmark(rdev, radeon_benchmarking); |
821 | } | 822 | } |
822 | return 0; | 823 | return 0; |
823 | } | 824 | } |
@@ -981,7 +982,7 @@ struct radeon_debugfs { | |||
981 | struct drm_info_list *files; | 982 | struct drm_info_list *files; |
982 | unsigned num_files; | 983 | unsigned num_files; |
983 | }; | 984 | }; |
984 | static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; | 985 | static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_COMPONENTS]; |
985 | static unsigned _radeon_debugfs_count = 0; | 986 | static unsigned _radeon_debugfs_count = 0; |
986 | 987 | ||
987 | int radeon_debugfs_add_files(struct radeon_device *rdev, | 988 | int radeon_debugfs_add_files(struct radeon_device *rdev, |
@@ -996,14 +997,17 @@ int radeon_debugfs_add_files(struct radeon_device *rdev, | |||
996 | return 0; | 997 | return 0; |
997 | } | 998 | } |
998 | } | 999 | } |
999 | if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { | 1000 | |
1000 | DRM_ERROR("Reached maximum number of debugfs files.\n"); | 1001 | i = _radeon_debugfs_count + 1; |
1001 | DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); | 1002 | if (i > RADEON_DEBUGFS_MAX_COMPONENTS) { |
1003 | DRM_ERROR("Reached maximum number of debugfs components.\n"); | ||
1004 | DRM_ERROR("Report so we increase " | ||
1005 | "RADEON_DEBUGFS_MAX_COMPONENTS.\n"); | ||
1002 | return -EINVAL; | 1006 | return -EINVAL; |
1003 | } | 1007 | } |
1004 | _radeon_debugfs[_radeon_debugfs_count].files = files; | 1008 | _radeon_debugfs[_radeon_debugfs_count].files = files; |
1005 | _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; | 1009 | _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; |
1006 | _radeon_debugfs_count++; | 1010 | _radeon_debugfs_count = i; |
1007 | #if defined(CONFIG_DEBUG_FS) | 1011 | #if defined(CONFIG_DEBUG_FS) |
1008 | drm_debugfs_create_files(files, nfiles, | 1012 | drm_debugfs_create_files(files, nfiles, |
1009 | rdev->ddev->control->debugfs_root, | 1013 | rdev->ddev->control->debugfs_root, |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 3475a09f946b..76ec0e9ed8ae 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -263,7 +263,7 @@ retry: | |||
263 | */ | 263 | */ |
264 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { | 264 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { |
265 | /* good news we believe it's a lockup */ | 265 | /* good news we believe it's a lockup */ |
266 | WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", | 266 | printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", |
267 | fence->seq, seq); | 267 | fence->seq, seq); |
268 | /* FIXME: what should we do ? marking everyone | 268 | /* FIXME: what should we do ? marking everyone |
269 | * as signaled for now | 269 | * as signaled for now |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index a533f52fd163..fdc3a9a54bf8 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -142,7 +142,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
142 | u64 page_base; | 142 | u64 page_base; |
143 | 143 | ||
144 | if (!rdev->gart.ready) { | 144 | if (!rdev->gart.ready) { |
145 | WARN(1, "trying to unbind memory to unitialized GART !\n"); | 145 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); |
146 | return; | 146 | return; |
147 | } | 147 | } |
148 | t = offset / RADEON_GPU_PAGE_SIZE; | 148 | t = offset / RADEON_GPU_PAGE_SIZE; |
@@ -174,7 +174,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
174 | int i, j; | 174 | int i, j; |
175 | 175 | ||
176 | if (!rdev->gart.ready) { | 176 | if (!rdev->gart.ready) { |
177 | WARN(1, "trying to bind memory to unitialized GART !\n"); | 177 | WARN(1, "trying to bind memory to uninitialized GART !\n"); |
178 | return -EINVAL; | 178 | return -EINVAL; |
179 | } | 179 | } |
180 | t = offset / RADEON_GPU_PAGE_SIZE; | 180 | t = offset / RADEON_GPU_PAGE_SIZE; |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 6c111c1fa3f9..02cb7da4124d 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -81,8 +81,9 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_e | |||
81 | 81 | ||
82 | /* bit banging i2c */ | 82 | /* bit banging i2c */ |
83 | 83 | ||
84 | static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) | 84 | static int pre_xfer(struct i2c_adapter *i2c_adap) |
85 | { | 85 | { |
86 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
86 | struct radeon_device *rdev = i2c->dev->dev_private; | 87 | struct radeon_device *rdev = i2c->dev->dev_private; |
87 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | 88 | struct radeon_i2c_bus_rec *rec = &i2c->rec; |
88 | uint32_t temp; | 89 | uint32_t temp; |
@@ -137,19 +138,30 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) | |||
137 | WREG32(rec->en_data_reg, temp); | 138 | WREG32(rec->en_data_reg, temp); |
138 | 139 | ||
139 | /* mask the gpio pins for software use */ | 140 | /* mask the gpio pins for software use */ |
140 | temp = RREG32(rec->mask_clk_reg); | 141 | temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask; |
141 | if (lock_state) | ||
142 | temp |= rec->mask_clk_mask; | ||
143 | else | ||
144 | temp &= ~rec->mask_clk_mask; | ||
145 | WREG32(rec->mask_clk_reg, temp); | 142 | WREG32(rec->mask_clk_reg, temp); |
146 | temp = RREG32(rec->mask_clk_reg); | 143 | temp = RREG32(rec->mask_clk_reg); |
147 | 144 | ||
145 | temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask; | ||
146 | WREG32(rec->mask_data_reg, temp); | ||
148 | temp = RREG32(rec->mask_data_reg); | 147 | temp = RREG32(rec->mask_data_reg); |
149 | if (lock_state) | 148 | |
150 | temp |= rec->mask_data_mask; | 149 | return 0; |
151 | else | 150 | } |
152 | temp &= ~rec->mask_data_mask; | 151 | |
152 | static void post_xfer(struct i2c_adapter *i2c_adap) | ||
153 | { | ||
154 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
155 | struct radeon_device *rdev = i2c->dev->dev_private; | ||
156 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | ||
157 | uint32_t temp; | ||
158 | |||
159 | /* unmask the gpio pins for software use */ | ||
160 | temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask; | ||
161 | WREG32(rec->mask_clk_reg, temp); | ||
162 | temp = RREG32(rec->mask_clk_reg); | ||
163 | |||
164 | temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask; | ||
153 | WREG32(rec->mask_data_reg, temp); | 165 | WREG32(rec->mask_data_reg, temp); |
154 | temp = RREG32(rec->mask_data_reg); | 166 | temp = RREG32(rec->mask_data_reg); |
155 | } | 167 | } |
@@ -209,22 +221,6 @@ static void set_data(void *i2c_priv, int data) | |||
209 | WREG32(rec->en_data_reg, val); | 221 | WREG32(rec->en_data_reg, val); |
210 | } | 222 | } |
211 | 223 | ||
212 | static int pre_xfer(struct i2c_adapter *i2c_adap) | ||
213 | { | ||
214 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
215 | |||
216 | radeon_i2c_do_lock(i2c, 1); | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static void post_xfer(struct i2c_adapter *i2c_adap) | ||
222 | { | ||
223 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
224 | |||
225 | radeon_i2c_do_lock(i2c, 0); | ||
226 | } | ||
227 | |||
228 | /* hw i2c */ | 224 | /* hw i2c */ |
229 | 225 | ||
230 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) | 226 | static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 465746bd51b7..00da38424dfc 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c | |||
@@ -129,7 +129,7 @@ void radeon_disable_vblank(struct drm_device *dev, int crtc) | |||
129 | } | 129 | } |
130 | } | 130 | } |
131 | 131 | ||
132 | static inline u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int) | 132 | static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int) |
133 | { | 133 | { |
134 | u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS); | 134 | u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS); |
135 | u32 irq_mask = RADEON_SW_INT_TEST; | 135 | u32 irq_mask = RADEON_SW_INT_TEST; |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index c7b6cb428d09..b37ec0f1413a 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c | |||
@@ -864,7 +864,7 @@ void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder, | |||
864 | *v_sync_strt_wid = tmp; | 864 | *v_sync_strt_wid = tmp; |
865 | } | 865 | } |
866 | 866 | ||
867 | static inline int get_post_div(int value) | 867 | static int get_post_div(int value) |
868 | { | 868 | { |
869 | int post_div; | 869 | int post_div; |
870 | switch (value) { | 870 | switch (value) { |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 68820f5f6303..ed0178f03235 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -447,6 +447,7 @@ struct radeon_connector { | |||
447 | struct edid *edid; | 447 | struct edid *edid; |
448 | void *con_priv; | 448 | void *con_priv; |
449 | bool dac_load_detect; | 449 | bool dac_load_detect; |
450 | bool detected_by_load; /* if the connection status was determined by load */ | ||
450 | uint16_t connector_object_id; | 451 | uint16_t connector_object_id; |
451 | struct radeon_hpd hpd; | 452 | struct radeon_hpd hpd; |
452 | struct radeon_router router; | 453 | struct radeon_router router; |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 976c3b1b1b6e..1c851521f458 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -515,3 +515,44 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
515 | } | 515 | } |
516 | return 0; | 516 | return 0; |
517 | } | 517 | } |
518 | |||
519 | int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) | ||
520 | { | ||
521 | int r; | ||
522 | |||
523 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
524 | if (unlikely(r != 0)) | ||
525 | return r; | ||
526 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
527 | if (mem_type) | ||
528 | *mem_type = bo->tbo.mem.mem_type; | ||
529 | if (bo->tbo.sync_obj) | ||
530 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | ||
531 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
532 | ttm_bo_unreserve(&bo->tbo); | ||
533 | return r; | ||
534 | } | ||
535 | |||
536 | |||
537 | /** | ||
538 | * radeon_bo_reserve - reserve bo | ||
539 | * @bo: bo structure | ||
540 | * @no_wait: don't sleep while trying to reserve (return -EBUSY) | ||
541 | * | ||
542 | * Returns: | ||
543 | * -EBUSY: buffer is busy and @no_wait is true | ||
544 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by | ||
545 | * a signal. Release all buffer reservations and return to user-space. | ||
546 | */ | ||
547 | int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) | ||
548 | { | ||
549 | int r; | ||
550 | |||
551 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
552 | if (unlikely(r != 0)) { | ||
553 | if (r != -ERESTARTSYS) | ||
554 | dev_err(bo->rdev->dev, "%p reserve failed\n", bo); | ||
555 | return r; | ||
556 | } | ||
557 | return 0; | ||
558 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index ede6c13628f2..b07f0f9b8627 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -52,28 +52,7 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type) | |||
52 | return 0; | 52 | return 0; |
53 | } | 53 | } |
54 | 54 | ||
55 | /** | 55 | int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait); |
56 | * radeon_bo_reserve - reserve bo | ||
57 | * @bo: bo structure | ||
58 | * @no_wait: don't sleep while trying to reserve (return -EBUSY) | ||
59 | * | ||
60 | * Returns: | ||
61 | * -EBUSY: buffer is busy and @no_wait is true | ||
62 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by | ||
63 | * a signal. Release all buffer reservations and return to user-space. | ||
64 | */ | ||
65 | static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) | ||
66 | { | ||
67 | int r; | ||
68 | |||
69 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
70 | if (unlikely(r != 0)) { | ||
71 | if (r != -ERESTARTSYS) | ||
72 | dev_err(bo->rdev->dev, "%p reserve failed\n", bo); | ||
73 | return r; | ||
74 | } | ||
75 | return 0; | ||
76 | } | ||
77 | 56 | ||
78 | static inline void radeon_bo_unreserve(struct radeon_bo *bo) | 57 | static inline void radeon_bo_unreserve(struct radeon_bo *bo) |
79 | { | 58 | { |
@@ -118,23 +97,8 @@ static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) | |||
118 | return bo->tbo.addr_space_offset; | 97 | return bo->tbo.addr_space_offset; |
119 | } | 98 | } |
120 | 99 | ||
121 | static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, | 100 | extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, |
122 | bool no_wait) | 101 | bool no_wait); |
123 | { | ||
124 | int r; | ||
125 | |||
126 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
127 | if (unlikely(r != 0)) | ||
128 | return r; | ||
129 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
130 | if (mem_type) | ||
131 | *mem_type = bo->tbo.mem.mem_type; | ||
132 | if (bo->tbo.sync_obj) | ||
133 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | ||
134 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
135 | ttm_bo_unreserve(&bo->tbo); | ||
136 | return r; | ||
137 | } | ||
138 | 102 | ||
139 | extern int radeon_bo_create(struct radeon_device *rdev, | 103 | extern int radeon_bo_create(struct radeon_device *rdev, |
140 | unsigned long size, int byte_align, | 104 | unsigned long size, int byte_align, |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 08c0233db1b8..49d58202202c 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -35,6 +35,44 @@ | |||
35 | 35 | ||
36 | int radeon_debugfs_ib_init(struct radeon_device *rdev); | 36 | int radeon_debugfs_ib_init(struct radeon_device *rdev); |
37 | 37 | ||
38 | u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) | ||
39 | { | ||
40 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; | ||
41 | u32 pg_idx, pg_offset; | ||
42 | u32 idx_value = 0; | ||
43 | int new_page; | ||
44 | |||
45 | pg_idx = (idx * 4) / PAGE_SIZE; | ||
46 | pg_offset = (idx * 4) % PAGE_SIZE; | ||
47 | |||
48 | if (ibc->kpage_idx[0] == pg_idx) | ||
49 | return ibc->kpage[0][pg_offset/4]; | ||
50 | if (ibc->kpage_idx[1] == pg_idx) | ||
51 | return ibc->kpage[1][pg_offset/4]; | ||
52 | |||
53 | new_page = radeon_cs_update_pages(p, pg_idx); | ||
54 | if (new_page < 0) { | ||
55 | p->parser_error = new_page; | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | idx_value = ibc->kpage[new_page][pg_offset/4]; | ||
60 | return idx_value; | ||
61 | } | ||
62 | |||
63 | void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | ||
64 | { | ||
65 | #if DRM_DEBUG_CODE | ||
66 | if (rdev->cp.count_dw <= 0) { | ||
67 | DRM_ERROR("radeon: writting more dword to ring than expected !\n"); | ||
68 | } | ||
69 | #endif | ||
70 | rdev->cp.ring[rdev->cp.wptr++] = v; | ||
71 | rdev->cp.wptr &= rdev->cp.ptr_mask; | ||
72 | rdev->cp.count_dw--; | ||
73 | rdev->cp.ring_free_dw--; | ||
74 | } | ||
75 | |||
38 | void radeon_ib_bogus_cleanup(struct radeon_device *rdev) | 76 | void radeon_ib_bogus_cleanup(struct radeon_device *rdev) |
39 | { | 77 | { |
40 | struct radeon_ib *ib, *n; | 78 | struct radeon_ib *ib, *n; |
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 92e7ea73b7c5..e8422ae7fe74 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c | |||
@@ -272,12 +272,12 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * | |||
272 | return 0; | 272 | return 0; |
273 | } | 273 | } |
274 | 274 | ||
275 | static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | 275 | static int radeon_check_and_fixup_packet3(drm_radeon_private_t * |
276 | dev_priv, | 276 | dev_priv, |
277 | struct drm_file *file_priv, | 277 | struct drm_file *file_priv, |
278 | drm_radeon_kcmd_buffer_t * | 278 | drm_radeon_kcmd_buffer_t * |
279 | cmdbuf, | 279 | cmdbuf, |
280 | unsigned int *cmdsz) | 280 | unsigned int *cmdsz) |
281 | { | 281 | { |
282 | u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); | 282 | u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); |
283 | u32 offset, narrays; | 283 | u32 offset, narrays; |
@@ -446,8 +446,8 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | |||
446 | * CP hardware state programming functions | 446 | * CP hardware state programming functions |
447 | */ | 447 | */ |
448 | 448 | ||
449 | static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv, | 449 | static void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv, |
450 | struct drm_clip_rect * box) | 450 | struct drm_clip_rect * box) |
451 | { | 451 | { |
452 | RING_LOCALS; | 452 | RING_LOCALS; |
453 | 453 | ||
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index aa6a66eeb4ec..89a6e1ecea8d 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -182,6 +182,9 @@ int rs400_gart_enable(struct radeon_device *rdev) | |||
182 | /* Enable gart */ | 182 | /* Enable gart */ |
183 | WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); | 183 | WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); |
184 | rs400_gart_tlb_flush(rdev); | 184 | rs400_gart_tlb_flush(rdev); |
185 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | ||
186 | (unsigned)(rdev->mc.gtt_size >> 20), | ||
187 | (unsigned long long)rdev->gart.table_addr); | ||
185 | rdev->gart.ready = true; | 188 | rdev->gart.ready = true; |
186 | return 0; | 189 | return 0; |
187 | } | 190 | } |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 4b5d0e6974a8..9320dd6404f6 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -484,6 +484,9 @@ static int rs600_gart_enable(struct radeon_device *rdev) | |||
484 | tmp = RREG32_MC(R_000009_MC_CNTL1); | 484 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
485 | WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); | 485 | WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); |
486 | rs600_gart_tlb_flush(rdev); | 486 | rs600_gart_tlb_flush(rdev); |
487 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | ||
488 | (unsigned)(rdev->mc.gtt_size >> 20), | ||
489 | (unsigned long long)rdev->gart.table_addr); | ||
487 | rdev->gart.ready = true; | 490 | rdev->gart.ready = true; |
488 | return 0; | 491 | return 0; |
489 | } | 492 | } |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b13c2eedc321..87cc1feee3ac 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -161,6 +161,9 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) | |||
161 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | 161 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); |
162 | 162 | ||
163 | r600_pcie_gart_tlb_flush(rdev); | 163 | r600_pcie_gart_tlb_flush(rdev); |
164 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | ||
165 | (unsigned)(rdev->mc.gtt_size >> 20), | ||
166 | (unsigned long long)rdev->gart.table_addr); | ||
164 | rdev->gart.ready = true; | 167 | rdev->gart.ready = true; |
165 | return 0; | 168 | return 0; |
166 | } | 169 | } |
@@ -1184,8 +1187,6 @@ int rv770_resume(struct radeon_device *rdev) | |||
1184 | 1187 | ||
1185 | int rv770_suspend(struct radeon_device *rdev) | 1188 | int rv770_suspend(struct radeon_device *rdev) |
1186 | { | 1189 | { |
1187 | int r; | ||
1188 | |||
1189 | r600_audio_fini(rdev); | 1190 | r600_audio_fini(rdev); |
1190 | /* FIXME: we should wait for ring to be empty */ | 1191 | /* FIXME: we should wait for ring to be empty */ |
1191 | r700_cp_stop(rdev); | 1192 | r700_cp_stop(rdev); |
@@ -1193,14 +1194,8 @@ int rv770_suspend(struct radeon_device *rdev) | |||
1193 | r600_irq_suspend(rdev); | 1194 | r600_irq_suspend(rdev); |
1194 | radeon_wb_disable(rdev); | 1195 | radeon_wb_disable(rdev); |
1195 | rv770_pcie_gart_disable(rdev); | 1196 | rv770_pcie_gart_disable(rdev); |
1196 | /* unpin shaders bo */ | 1197 | r600_blit_suspend(rdev); |
1197 | if (rdev->r600_blit.shader_obj) { | 1198 | |
1198 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
1199 | if (likely(r == 0)) { | ||
1200 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
1201 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1202 | } | ||
1203 | } | ||
1204 | return 0; | 1199 | return 0; |
1205 | } | 1200 | } |
1206 | 1201 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index ef06194c5aa6..617b64678fc6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1293,6 +1293,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev, | |||
1293 | 1293 | ||
1294 | return ret; | 1294 | return ret; |
1295 | } | 1295 | } |
1296 | EXPORT_SYMBOL(ttm_bo_create); | ||
1296 | 1297 | ||
1297 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | 1298 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, |
1298 | unsigned mem_type, bool allow_errors) | 1299 | unsigned mem_type, bool allow_errors) |
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index 30ad13344f7b..794ff67c5701 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig | |||
@@ -7,7 +7,8 @@ config DRM_VMWGFX | |||
7 | select FB_CFB_IMAGEBLIT | 7 | select FB_CFB_IMAGEBLIT |
8 | select DRM_TTM | 8 | select DRM_TTM |
9 | help | 9 | help |
10 | KMS enabled DRM driver for SVGA2 virtual hardware. | 10 | Choose this option if you would like to run 3D acceleration |
11 | 11 | in a VMware virtual machine. | |
12 | If unsure say n. The compiled module will be | 12 | This is a KMS enabled DRM driver for the VMware SVGA2 |
13 | called vmwgfx.ko | 13 | virtual hardware. |
14 | The compiled module will be called "vmwgfx.ko". | ||
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index c9281a1b1d3b..586869c8c11f 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
@@ -4,6 +4,7 @@ ccflags-y := -Iinclude/drm | |||
4 | vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | 4 | vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ |
5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ | 5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ |
6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ |
7 | vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o | 7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ |
8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o | ||
8 | 9 | ||
9 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 10 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h index 77cb45331000..d0e085ee8249 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h | |||
@@ -57,7 +57,8 @@ typedef enum { | |||
57 | SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1), | 57 | SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1), |
58 | SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4), | 58 | SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4), |
59 | SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0), | 59 | SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0), |
60 | SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS65_B1, | 60 | SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1), |
61 | SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1, | ||
61 | } SVGA3dHardwareVersion; | 62 | } SVGA3dHardwareVersion; |
62 | 63 | ||
63 | /* | 64 | /* |
@@ -67,7 +68,8 @@ typedef enum { | |||
67 | typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ | 68 | typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ |
68 | #define SVGA3D_NUM_CLIPPLANES 6 | 69 | #define SVGA3D_NUM_CLIPPLANES 6 |
69 | #define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8 | 70 | #define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8 |
70 | 71 | #define SVGA3D_MAX_CONTEXT_IDS 256 | |
72 | #define SVGA3D_MAX_SURFACE_IDS (32 * 1024) | ||
71 | 73 | ||
72 | /* | 74 | /* |
73 | * Surface formats. | 75 | * Surface formats. |
@@ -79,76 +81,91 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ | |||
79 | */ | 81 | */ |
80 | 82 | ||
81 | typedef enum SVGA3dSurfaceFormat { | 83 | typedef enum SVGA3dSurfaceFormat { |
82 | SVGA3D_FORMAT_INVALID = 0, | 84 | SVGA3D_FORMAT_INVALID = 0, |
83 | 85 | ||
84 | SVGA3D_X8R8G8B8 = 1, | 86 | SVGA3D_X8R8G8B8 = 1, |
85 | SVGA3D_A8R8G8B8 = 2, | 87 | SVGA3D_A8R8G8B8 = 2, |
86 | 88 | ||
87 | SVGA3D_R5G6B5 = 3, | 89 | SVGA3D_R5G6B5 = 3, |
88 | SVGA3D_X1R5G5B5 = 4, | 90 | SVGA3D_X1R5G5B5 = 4, |
89 | SVGA3D_A1R5G5B5 = 5, | 91 | SVGA3D_A1R5G5B5 = 5, |
90 | SVGA3D_A4R4G4B4 = 6, | 92 | SVGA3D_A4R4G4B4 = 6, |
91 | 93 | ||
92 | SVGA3D_Z_D32 = 7, | 94 | SVGA3D_Z_D32 = 7, |
93 | SVGA3D_Z_D16 = 8, | 95 | SVGA3D_Z_D16 = 8, |
94 | SVGA3D_Z_D24S8 = 9, | 96 | SVGA3D_Z_D24S8 = 9, |
95 | SVGA3D_Z_D15S1 = 10, | 97 | SVGA3D_Z_D15S1 = 10, |
96 | 98 | ||
97 | SVGA3D_LUMINANCE8 = 11, | 99 | SVGA3D_LUMINANCE8 = 11, |
98 | SVGA3D_LUMINANCE4_ALPHA4 = 12, | 100 | SVGA3D_LUMINANCE4_ALPHA4 = 12, |
99 | SVGA3D_LUMINANCE16 = 13, | 101 | SVGA3D_LUMINANCE16 = 13, |
100 | SVGA3D_LUMINANCE8_ALPHA8 = 14, | 102 | SVGA3D_LUMINANCE8_ALPHA8 = 14, |
101 | 103 | ||
102 | SVGA3D_DXT1 = 15, | 104 | SVGA3D_DXT1 = 15, |
103 | SVGA3D_DXT2 = 16, | 105 | SVGA3D_DXT2 = 16, |
104 | SVGA3D_DXT3 = 17, | 106 | SVGA3D_DXT3 = 17, |
105 | SVGA3D_DXT4 = 18, | 107 | SVGA3D_DXT4 = 18, |
106 | SVGA3D_DXT5 = 19, | 108 | SVGA3D_DXT5 = 19, |
107 | 109 | ||
108 | SVGA3D_BUMPU8V8 = 20, | 110 | SVGA3D_BUMPU8V8 = 20, |
109 | SVGA3D_BUMPL6V5U5 = 21, | 111 | SVGA3D_BUMPL6V5U5 = 21, |
110 | SVGA3D_BUMPX8L8V8U8 = 22, | 112 | SVGA3D_BUMPX8L8V8U8 = 22, |
111 | SVGA3D_BUMPL8V8U8 = 23, | 113 | SVGA3D_BUMPL8V8U8 = 23, |
112 | 114 | ||
113 | SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */ | 115 | SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */ |
114 | SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */ | 116 | SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */ |
115 | 117 | ||
116 | SVGA3D_A2R10G10B10 = 26, | 118 | SVGA3D_A2R10G10B10 = 26, |
117 | 119 | ||
118 | /* signed formats */ | 120 | /* signed formats */ |
119 | SVGA3D_V8U8 = 27, | 121 | SVGA3D_V8U8 = 27, |
120 | SVGA3D_Q8W8V8U8 = 28, | 122 | SVGA3D_Q8W8V8U8 = 28, |
121 | SVGA3D_CxV8U8 = 29, | 123 | SVGA3D_CxV8U8 = 29, |
122 | 124 | ||
123 | /* mixed formats */ | 125 | /* mixed formats */ |
124 | SVGA3D_X8L8V8U8 = 30, | 126 | SVGA3D_X8L8V8U8 = 30, |
125 | SVGA3D_A2W10V10U10 = 31, | 127 | SVGA3D_A2W10V10U10 = 31, |
126 | 128 | ||
127 | SVGA3D_ALPHA8 = 32, | 129 | SVGA3D_ALPHA8 = 32, |
128 | 130 | ||
129 | /* Single- and dual-component floating point formats */ | 131 | /* Single- and dual-component floating point formats */ |
130 | SVGA3D_R_S10E5 = 33, | 132 | SVGA3D_R_S10E5 = 33, |
131 | SVGA3D_R_S23E8 = 34, | 133 | SVGA3D_R_S23E8 = 34, |
132 | SVGA3D_RG_S10E5 = 35, | 134 | SVGA3D_RG_S10E5 = 35, |
133 | SVGA3D_RG_S23E8 = 36, | 135 | SVGA3D_RG_S23E8 = 36, |
134 | 136 | ||
135 | /* | 137 | /* |
136 | * Any surface can be used as a buffer object, but SVGA3D_BUFFER is | 138 | * Any surface can be used as a buffer object, but SVGA3D_BUFFER is |
137 | * the most efficient format to use when creating new surfaces | 139 | * the most efficient format to use when creating new surfaces |
138 | * expressly for index or vertex data. | 140 | * expressly for index or vertex data. |
139 | */ | 141 | */ |
140 | SVGA3D_BUFFER = 37, | ||
141 | 142 | ||
142 | SVGA3D_Z_D24X8 = 38, | 143 | SVGA3D_BUFFER = 37, |
144 | |||
145 | SVGA3D_Z_D24X8 = 38, | ||
143 | 146 | ||
144 | SVGA3D_V16U16 = 39, | 147 | SVGA3D_V16U16 = 39, |
145 | 148 | ||
146 | SVGA3D_G16R16 = 40, | 149 | SVGA3D_G16R16 = 40, |
147 | SVGA3D_A16B16G16R16 = 41, | 150 | SVGA3D_A16B16G16R16 = 41, |
148 | 151 | ||
149 | /* Packed Video formats */ | 152 | /* Packed Video formats */ |
150 | SVGA3D_UYVY = 42, | 153 | SVGA3D_UYVY = 42, |
151 | SVGA3D_YUY2 = 43, | 154 | SVGA3D_YUY2 = 43, |
155 | |||
156 | /* Planar video formats */ | ||
157 | SVGA3D_NV12 = 44, | ||
158 | |||
159 | /* Video format with alpha */ | ||
160 | SVGA3D_AYUV = 45, | ||
161 | |||
162 | SVGA3D_BC4_UNORM = 108, | ||
163 | SVGA3D_BC5_UNORM = 111, | ||
164 | |||
165 | /* Advanced D3D9 depth formats. */ | ||
166 | SVGA3D_Z_DF16 = 118, | ||
167 | SVGA3D_Z_DF24 = 119, | ||
168 | SVGA3D_Z_D24S8_INT = 120, | ||
152 | 169 | ||
153 | SVGA3D_FORMAT_MAX | 170 | SVGA3D_FORMAT_MAX |
154 | } SVGA3dSurfaceFormat; | 171 | } SVGA3dSurfaceFormat; |
@@ -414,10 +431,20 @@ typedef enum { | |||
414 | SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */ | 431 | SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */ |
415 | SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */ | 432 | SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */ |
416 | SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */ | 433 | SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */ |
434 | SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */ | ||
435 | SVGA3D_RS_LINEAA = 98, /* SVGA3dBool */ | ||
436 | SVGA3D_RS_LINEWIDTH = 99, /* float */ | ||
417 | SVGA3D_RS_MAX | 437 | SVGA3D_RS_MAX |
418 | } SVGA3dRenderStateName; | 438 | } SVGA3dRenderStateName; |
419 | 439 | ||
420 | typedef enum { | 440 | typedef enum { |
441 | SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0, | ||
442 | SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1, | ||
443 | SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2, | ||
444 | SVGA3D_TRANSPARENCYANTIALIAS_MAX | ||
445 | } SVGA3dTransparencyAntialiasType; | ||
446 | |||
447 | typedef enum { | ||
421 | SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */ | 448 | SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */ |
422 | SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */ | 449 | SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */ |
423 | SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */ | 450 | SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */ |
@@ -728,10 +755,10 @@ typedef enum { | |||
728 | SVGA3D_TEX_FILTER_NEAREST = 1, | 755 | SVGA3D_TEX_FILTER_NEAREST = 1, |
729 | SVGA3D_TEX_FILTER_LINEAR = 2, | 756 | SVGA3D_TEX_FILTER_LINEAR = 2, |
730 | SVGA3D_TEX_FILTER_ANISOTROPIC = 3, | 757 | SVGA3D_TEX_FILTER_ANISOTROPIC = 3, |
731 | SVGA3D_TEX_FILTER_FLATCUBIC = 4, // Deprecated, not implemented | 758 | SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */ |
732 | SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, // Deprecated, not implemented | 759 | SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */ |
733 | SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, // Not currently implemented | 760 | SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */ |
734 | SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, // Not currently implemented | 761 | SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */ |
735 | SVGA3D_TEX_FILTER_MAX | 762 | SVGA3D_TEX_FILTER_MAX |
736 | } SVGA3dTextureFilter; | 763 | } SVGA3dTextureFilter; |
737 | 764 | ||
@@ -799,19 +826,19 @@ typedef enum { | |||
799 | 826 | ||
800 | typedef enum { | 827 | typedef enum { |
801 | SVGA3D_DECLUSAGE_POSITION = 0, | 828 | SVGA3D_DECLUSAGE_POSITION = 0, |
802 | SVGA3D_DECLUSAGE_BLENDWEIGHT, // 1 | 829 | SVGA3D_DECLUSAGE_BLENDWEIGHT, /* 1 */ |
803 | SVGA3D_DECLUSAGE_BLENDINDICES, // 2 | 830 | SVGA3D_DECLUSAGE_BLENDINDICES, /* 2 */ |
804 | SVGA3D_DECLUSAGE_NORMAL, // 3 | 831 | SVGA3D_DECLUSAGE_NORMAL, /* 3 */ |
805 | SVGA3D_DECLUSAGE_PSIZE, // 4 | 832 | SVGA3D_DECLUSAGE_PSIZE, /* 4 */ |
806 | SVGA3D_DECLUSAGE_TEXCOORD, // 5 | 833 | SVGA3D_DECLUSAGE_TEXCOORD, /* 5 */ |
807 | SVGA3D_DECLUSAGE_TANGENT, // 6 | 834 | SVGA3D_DECLUSAGE_TANGENT, /* 6 */ |
808 | SVGA3D_DECLUSAGE_BINORMAL, // 7 | 835 | SVGA3D_DECLUSAGE_BINORMAL, /* 7 */ |
809 | SVGA3D_DECLUSAGE_TESSFACTOR, // 8 | 836 | SVGA3D_DECLUSAGE_TESSFACTOR, /* 8 */ |
810 | SVGA3D_DECLUSAGE_POSITIONT, // 9 | 837 | SVGA3D_DECLUSAGE_POSITIONT, /* 9 */ |
811 | SVGA3D_DECLUSAGE_COLOR, // 10 | 838 | SVGA3D_DECLUSAGE_COLOR, /* 10 */ |
812 | SVGA3D_DECLUSAGE_FOG, // 11 | 839 | SVGA3D_DECLUSAGE_FOG, /* 11 */ |
813 | SVGA3D_DECLUSAGE_DEPTH, // 12 | 840 | SVGA3D_DECLUSAGE_DEPTH, /* 12 */ |
814 | SVGA3D_DECLUSAGE_SAMPLE, // 13 | 841 | SVGA3D_DECLUSAGE_SAMPLE, /* 13 */ |
815 | SVGA3D_DECLUSAGE_MAX | 842 | SVGA3D_DECLUSAGE_MAX |
816 | } SVGA3dDeclUsage; | 843 | } SVGA3dDeclUsage; |
817 | 844 | ||
@@ -819,10 +846,10 @@ typedef enum { | |||
819 | SVGA3D_DECLMETHOD_DEFAULT = 0, | 846 | SVGA3D_DECLMETHOD_DEFAULT = 0, |
820 | SVGA3D_DECLMETHOD_PARTIALU, | 847 | SVGA3D_DECLMETHOD_PARTIALU, |
821 | SVGA3D_DECLMETHOD_PARTIALV, | 848 | SVGA3D_DECLMETHOD_PARTIALV, |
822 | SVGA3D_DECLMETHOD_CROSSUV, // Normal | 849 | SVGA3D_DECLMETHOD_CROSSUV, /* Normal */ |
823 | SVGA3D_DECLMETHOD_UV, | 850 | SVGA3D_DECLMETHOD_UV, |
824 | SVGA3D_DECLMETHOD_LOOKUP, // Lookup a displacement map | 851 | SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */ |
825 | SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map | 852 | SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */ |
826 | } SVGA3dDeclMethod; | 853 | } SVGA3dDeclMethod; |
827 | 854 | ||
828 | typedef enum { | 855 | typedef enum { |
@@ -930,7 +957,6 @@ typedef enum { | |||
930 | } SVGA3dCubeFace; | 957 | } SVGA3dCubeFace; |
931 | 958 | ||
932 | typedef enum { | 959 | typedef enum { |
933 | SVGA3D_SHADERTYPE_COMPILED_DX8 = 0, | ||
934 | SVGA3D_SHADERTYPE_VS = 1, | 960 | SVGA3D_SHADERTYPE_VS = 1, |
935 | SVGA3D_SHADERTYPE_PS = 2, | 961 | SVGA3D_SHADERTYPE_PS = 2, |
936 | SVGA3D_SHADERTYPE_MAX | 962 | SVGA3D_SHADERTYPE_MAX |
@@ -968,12 +994,18 @@ typedef enum { | |||
968 | } SVGA3dTransferType; | 994 | } SVGA3dTransferType; |
969 | 995 | ||
970 | /* | 996 | /* |
971 | * The maximum number vertex arrays we're guaranteed to support in | 997 | * The maximum number of vertex arrays we're guaranteed to support in |
972 | * SVGA_3D_CMD_DRAWPRIMITIVES. | 998 | * SVGA_3D_CMD_DRAWPRIMITIVES. |
973 | */ | 999 | */ |
974 | #define SVGA3D_MAX_VERTEX_ARRAYS 32 | 1000 | #define SVGA3D_MAX_VERTEX_ARRAYS 32 |
975 | 1001 | ||
976 | /* | 1002 | /* |
1003 | * The maximum number of primitive ranges we're guaranteed to support | ||
1004 | * in SVGA_3D_CMD_DRAWPRIMITIVES. | ||
1005 | */ | ||
1006 | #define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32 | ||
1007 | |||
1008 | /* | ||
977 | * Identifiers for commands in the command FIFO. | 1009 | * Identifiers for commands in the command FIFO. |
978 | * | 1010 | * |
979 | * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of | 1011 | * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of |
@@ -990,7 +1022,7 @@ typedef enum { | |||
990 | #define SVGA_3D_CMD_LEGACY_BASE 1000 | 1022 | #define SVGA_3D_CMD_LEGACY_BASE 1000 |
991 | #define SVGA_3D_CMD_BASE 1040 | 1023 | #define SVGA_3D_CMD_BASE 1040 |
992 | 1024 | ||
993 | #define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 | 1025 | #define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 /* Deprecated */ |
994 | #define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1 | 1026 | #define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1 |
995 | #define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2 | 1027 | #define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2 |
996 | #define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3 | 1028 | #define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3 |
@@ -1008,7 +1040,7 @@ typedef enum { | |||
1008 | #define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15 | 1040 | #define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15 |
1009 | #define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16 | 1041 | #define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16 |
1010 | #define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17 | 1042 | #define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17 |
1011 | #define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated | 1043 | #define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 /* Deprecated */ |
1012 | #define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19 | 1044 | #define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19 |
1013 | #define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20 | 1045 | #define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20 |
1014 | #define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21 | 1046 | #define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21 |
@@ -1018,9 +1050,13 @@ typedef enum { | |||
1018 | #define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25 | 1050 | #define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25 |
1019 | #define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26 | 1051 | #define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26 |
1020 | #define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27 | 1052 | #define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27 |
1021 | #define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated | 1053 | #define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 /* Deprecated */ |
1022 | #define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29 | 1054 | #define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29 |
1023 | #define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30 | 1055 | #define SVGA_3D_CMD_SURFACE_DEFINE_V2 SVGA_3D_CMD_BASE + 30 |
1056 | #define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 | ||
1057 | #define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 | ||
1058 | #define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 | ||
1059 | #define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42 | ||
1024 | 1060 | ||
1025 | #define SVGA_3D_CMD_FUTURE_MAX 2000 | 1061 | #define SVGA_3D_CMD_FUTURE_MAX 2000 |
1026 | 1062 | ||
@@ -1031,9 +1067,9 @@ typedef enum { | |||
1031 | typedef struct { | 1067 | typedef struct { |
1032 | union { | 1068 | union { |
1033 | struct { | 1069 | struct { |
1034 | uint16 function; // SVGA3dFogFunction | 1070 | uint16 function; /* SVGA3dFogFunction */ |
1035 | uint8 type; // SVGA3dFogType | 1071 | uint8 type; /* SVGA3dFogType */ |
1036 | uint8 base; // SVGA3dFogBase | 1072 | uint8 base; /* SVGA3dFogBase */ |
1037 | }; | 1073 | }; |
1038 | uint32 uintValue; | 1074 | uint32 uintValue; |
1039 | }; | 1075 | }; |
@@ -1109,6 +1145,8 @@ typedef enum { | |||
1109 | SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6), | 1145 | SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6), |
1110 | SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7), | 1146 | SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7), |
1111 | SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8), | 1147 | SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8), |
1148 | SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9), | ||
1149 | SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10), | ||
1112 | } SVGA3dSurfaceFlags; | 1150 | } SVGA3dSurfaceFlags; |
1113 | 1151 | ||
1114 | typedef | 1152 | typedef |
@@ -1121,6 +1159,12 @@ struct { | |||
1121 | uint32 sid; | 1159 | uint32 sid; |
1122 | SVGA3dSurfaceFlags surfaceFlags; | 1160 | SVGA3dSurfaceFlags surfaceFlags; |
1123 | SVGA3dSurfaceFormat format; | 1161 | SVGA3dSurfaceFormat format; |
1162 | /* | ||
1163 | * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace | ||
1164 | * structures must have the same value of numMipLevels field. | ||
1165 | * Otherwise, all but the first SVGA3dSurfaceFace structures must have the | ||
1166 | * numMipLevels set to 0. | ||
1167 | */ | ||
1124 | SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; | 1168 | SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; |
1125 | /* | 1169 | /* |
1126 | * Followed by an SVGA3dSize structure for each mip level in each face. | 1170 | * Followed by an SVGA3dSize structure for each mip level in each face. |
@@ -1135,6 +1179,31 @@ struct { | |||
1135 | 1179 | ||
1136 | typedef | 1180 | typedef |
1137 | struct { | 1181 | struct { |
1182 | uint32 sid; | ||
1183 | SVGA3dSurfaceFlags surfaceFlags; | ||
1184 | SVGA3dSurfaceFormat format; | ||
1185 | /* | ||
1186 | * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace | ||
1187 | * structures must have the same value of numMipLevels field. | ||
1188 | * Otherwise, all but the first SVGA3dSurfaceFace structures must have the | ||
1189 | * numMipLevels set to 0. | ||
1190 | */ | ||
1191 | SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; | ||
1192 | uint32 multisampleCount; | ||
1193 | SVGA3dTextureFilter autogenFilter; | ||
1194 | /* | ||
1195 | * Followed by an SVGA3dSize structure for each mip level in each face. | ||
1196 | * | ||
1197 | * A note on surface sizes: Sizes are always specified in pixels, | ||
1198 | * even if the true surface size is not a multiple of the minimum | ||
1199 | * block size of the surface's format. For example, a 3x3x1 DXT1 | ||
1200 | * compressed texture would actually be stored as a 4x4x1 image in | ||
1201 | * memory. | ||
1202 | */ | ||
1203 | } SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */ | ||
1204 | |||
1205 | typedef | ||
1206 | struct { | ||
1138 | uint32 sid; | 1207 | uint32 sid; |
1139 | } SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */ | 1208 | } SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */ |
1140 | 1209 | ||
@@ -1474,10 +1543,12 @@ struct { | |||
1474 | * SVGA3dCmdDrawPrimitives structure. In order, | 1543 | * SVGA3dCmdDrawPrimitives structure. In order, |
1475 | * they are: | 1544 | * they are: |
1476 | * | 1545 | * |
1477 | * 1. SVGA3dVertexDecl, quantity 'numVertexDecls' | 1546 | * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than |
1478 | * 2. SVGA3dPrimitiveRange, quantity 'numRanges' | 1547 | * SVGA3D_MAX_VERTEX_ARRAYS; |
1548 | * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than | ||
1549 | * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES; | ||
1479 | * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains | 1550 | * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains |
1480 | * the frequency divisor for this the corresponding vertex decl) | 1551 | * the frequency divisor for the corresponding vertex decl). |
1481 | */ | 1552 | */ |
1482 | } SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */ | 1553 | } SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */ |
1483 | 1554 | ||
@@ -1671,6 +1742,12 @@ struct { | |||
1671 | /* Clipping: zero or more SVGASignedRects follow */ | 1742 | /* Clipping: zero or more SVGASignedRects follow */ |
1672 | } SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */ | 1743 | } SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */ |
1673 | 1744 | ||
1745 | typedef | ||
1746 | struct { | ||
1747 | uint32 sid; | ||
1748 | SVGA3dTextureFilter filter; | ||
1749 | } SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */ | ||
1750 | |||
1674 | 1751 | ||
1675 | /* | 1752 | /* |
1676 | * Capability query index. | 1753 | * Capability query index. |
@@ -1774,6 +1851,32 @@ typedef enum { | |||
1774 | SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67, | 1851 | SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67, |
1775 | SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68, | 1852 | SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68, |
1776 | SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69, | 1853 | SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69, |
1854 | SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70, | ||
1855 | SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71, | ||
1856 | SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72, | ||
1857 | SVGA3D_DEVCAP_SUPERSAMPLE = 73, | ||
1858 | SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74, | ||
1859 | SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75, | ||
1860 | SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76, | ||
1861 | |||
1862 | /* | ||
1863 | * This is the maximum number of SVGA context IDs that the guest | ||
1864 | * can define using SVGA_3D_CMD_CONTEXT_DEFINE. | ||
1865 | */ | ||
1866 | SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77, | ||
1867 | |||
1868 | /* | ||
1869 | * This is the maximum number of SVGA surface IDs that the guest | ||
1870 | * can define using SVGA_3D_CMD_SURFACE_DEFINE*. | ||
1871 | */ | ||
1872 | SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78, | ||
1873 | |||
1874 | SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79, | ||
1875 | SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80, | ||
1876 | SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81, | ||
1877 | |||
1878 | SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM = 82, | ||
1879 | SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, | ||
1777 | 1880 | ||
1778 | /* | 1881 | /* |
1779 | * Don't add new caps into the previous section; the values in this | 1882 | * Don't add new caps into the previous section; the values in this |
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h index 7b85e9b8c854..8e8d9682e018 100644 --- a/drivers/gpu/drm/vmwgfx/svga_escape.h +++ b/drivers/gpu/drm/vmwgfx/svga_escape.h | |||
@@ -75,7 +75,7 @@ | |||
75 | */ | 75 | */ |
76 | 76 | ||
77 | #define SVGA_ESCAPE_VMWARE_HINT 0x00030000 | 77 | #define SVGA_ESCAPE_VMWARE_HINT 0x00030000 |
78 | #define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated | 78 | #define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 /* Deprecated */ |
79 | 79 | ||
80 | typedef | 80 | typedef |
81 | struct { | 81 | struct { |
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h index f753d73c14b4..f38416fcb046 100644 --- a/drivers/gpu/drm/vmwgfx/svga_overlay.h +++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h | |||
@@ -38,9 +38,9 @@ | |||
38 | * Video formats we support | 38 | * Video formats we support |
39 | */ | 39 | */ |
40 | 40 | ||
41 | #define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2' | 41 | #define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */ |
42 | #define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2' | 42 | #define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */ |
43 | #define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y' | 43 | #define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */ |
44 | 44 | ||
45 | typedef enum { | 45 | typedef enum { |
46 | SVGA_OVERLAY_FORMAT_INVALID = 0, | 46 | SVGA_OVERLAY_FORMAT_INVALID = 0, |
@@ -68,7 +68,7 @@ struct SVGAEscapeVideoSetRegs { | |||
68 | uint32 streamId; | 68 | uint32 streamId; |
69 | } header; | 69 | } header; |
70 | 70 | ||
71 | // May include zero or more items. | 71 | /* May include zero or more items. */ |
72 | struct { | 72 | struct { |
73 | uint32 registerId; | 73 | uint32 registerId; |
74 | uint32 value; | 74 | uint32 value; |
@@ -134,12 +134,12 @@ struct { | |||
134 | */ | 134 | */ |
135 | 135 | ||
136 | static inline bool | 136 | static inline bool |
137 | VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN | 137 | VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */ |
138 | uint32 *width, // IN / OUT | 138 | uint32 *width, /* IN / OUT */ |
139 | uint32 *height, // IN / OUT | 139 | uint32 *height, /* IN / OUT */ |
140 | uint32 *size, // OUT | 140 | uint32 *size, /* OUT */ |
141 | uint32 *pitches, // OUT (optional) | 141 | uint32 *pitches, /* OUT (optional) */ |
142 | uint32 *offsets) // OUT (optional) | 142 | uint32 *offsets) /* OUT (optional) */ |
143 | { | 143 | { |
144 | int tmp; | 144 | int tmp; |
145 | 145 | ||
@@ -198,4 +198,4 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN | |||
198 | return true; | 198 | return true; |
199 | } | 199 | } |
200 | 200 | ||
201 | #endif // _SVGA_OVERLAY_H_ | 201 | #endif /* _SVGA_OVERLAY_H_ */ |
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h index 1b96c2ec07dd..01f63cb49678 100644 --- a/drivers/gpu/drm/vmwgfx/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga_reg.h | |||
@@ -39,6 +39,15 @@ | |||
39 | #define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405 | 39 | #define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405 |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * SVGA_REG_ENABLE bit definitions. | ||
43 | */ | ||
44 | #define SVGA_REG_ENABLE_DISABLE 0 | ||
45 | #define SVGA_REG_ENABLE_ENABLE 1 | ||
46 | #define SVGA_REG_ENABLE_HIDE 2 | ||
47 | #define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\ | ||
48 | SVGA_REG_ENABLE_HIDE) | ||
49 | |||
50 | /* | ||
42 | * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned | 51 | * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned |
43 | * cursor bypass mode. This is still supported, but no new guest | 52 | * cursor bypass mode. This is still supported, but no new guest |
44 | * drivers should use it. | 53 | * drivers should use it. |
@@ -158,7 +167,9 @@ enum { | |||
158 | SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44, | 167 | SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44, |
159 | 168 | ||
160 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ | 169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ |
161 | SVGA_REG_TOP = 46, /* Must be 1 more than the last register */ | 170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ |
171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ | ||
172 | SVGA_REG_TOP = 48, /* Must be 1 more than the last register */ | ||
162 | 173 | ||
163 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ | 174 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ |
164 | /* Next 768 (== 256*3) registers exist for colormap */ | 175 | /* Next 768 (== 256*3) registers exist for colormap */ |
@@ -265,7 +276,7 @@ enum { | |||
265 | * possible. | 276 | * possible. |
266 | */ | 277 | */ |
267 | #define SVGA_GMR_NULL ((uint32) -1) | 278 | #define SVGA_GMR_NULL ((uint32) -1) |
268 | #define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB) | 279 | #define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */ |
269 | 280 | ||
270 | typedef | 281 | typedef |
271 | struct SVGAGuestMemDescriptor { | 282 | struct SVGAGuestMemDescriptor { |
@@ -306,13 +317,35 @@ struct SVGAGMRImageFormat { | |||
306 | struct { | 317 | struct { |
307 | uint32 bitsPerPixel : 8; | 318 | uint32 bitsPerPixel : 8; |
308 | uint32 colorDepth : 8; | 319 | uint32 colorDepth : 8; |
309 | uint32 reserved : 16; // Must be zero | 320 | uint32 reserved : 16; /* Must be zero */ |
310 | }; | 321 | }; |
311 | 322 | ||
312 | uint32 value; | 323 | uint32 value; |
313 | }; | 324 | }; |
314 | } SVGAGMRImageFormat; | 325 | } SVGAGMRImageFormat; |
315 | 326 | ||
327 | typedef | ||
328 | struct SVGAGuestImage { | ||
329 | SVGAGuestPtr ptr; | ||
330 | |||
331 | /* | ||
332 | * A note on interpretation of pitch: This value of pitch is the | ||
333 | * number of bytes between vertically adjacent image | ||
334 | * blocks. Normally this is the number of bytes between the first | ||
335 | * pixel of two adjacent scanlines. With compressed textures, | ||
336 | * however, this may represent the number of bytes between | ||
337 | * compression blocks rather than between rows of pixels. | ||
338 | * | ||
339 | * XXX: Compressed textures currently must be tightly packed in guest memory. | ||
340 | * | ||
341 | * If the image is 1-dimensional, pitch is ignored. | ||
342 | * | ||
343 | * If 'pitch' is zero, the SVGA3D device calculates a pitch value | ||
344 | * assuming each row of blocks is tightly packed. | ||
345 | */ | ||
346 | uint32 pitch; | ||
347 | } SVGAGuestImage; | ||
348 | |||
316 | /* | 349 | /* |
317 | * SVGAColorBGRX -- | 350 | * SVGAColorBGRX -- |
318 | * | 351 | * |
@@ -328,7 +361,7 @@ struct SVGAColorBGRX { | |||
328 | uint32 b : 8; | 361 | uint32 b : 8; |
329 | uint32 g : 8; | 362 | uint32 g : 8; |
330 | uint32 r : 8; | 363 | uint32 r : 8; |
331 | uint32 x : 8; // Unused | 364 | uint32 x : 8; /* Unused */ |
332 | }; | 365 | }; |
333 | 366 | ||
334 | uint32 value; | 367 | uint32 value; |
@@ -370,23 +403,34 @@ struct SVGASignedPoint { | |||
370 | * Note the holes in the bitfield. Missing bits have been deprecated, | 403 | * Note the holes in the bitfield. Missing bits have been deprecated, |
371 | * and must not be reused. Those capabilities will never be reported | 404 | * and must not be reused. Those capabilities will never be reported |
372 | * by new versions of the SVGA device. | 405 | * by new versions of the SVGA device. |
406 | * | ||
407 | * SVGA_CAP_GMR2 -- | ||
408 | * Provides asynchronous commands to define and remap guest memory | ||
409 | * regions. Adds device registers SVGA_REG_GMRS_MAX_PAGES and | ||
410 | * SVGA_REG_MEMORY_SIZE. | ||
411 | * | ||
412 | * SVGA_CAP_SCREEN_OBJECT_2 -- | ||
413 | * Allow screen object support, and require backing stores from the | ||
414 | * guest for each screen object. | ||
373 | */ | 415 | */ |
374 | 416 | ||
375 | #define SVGA_CAP_NONE 0x00000000 | 417 | #define SVGA_CAP_NONE 0x00000000 |
376 | #define SVGA_CAP_RECT_COPY 0x00000002 | 418 | #define SVGA_CAP_RECT_COPY 0x00000002 |
377 | #define SVGA_CAP_CURSOR 0x00000020 | 419 | #define SVGA_CAP_CURSOR 0x00000020 |
378 | #define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead) | 420 | #define SVGA_CAP_CURSOR_BYPASS 0x00000040 /* Legacy (Use Cursor Bypass 3 instead) */ |
379 | #define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead) | 421 | #define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 /* Legacy (Use Cursor Bypass 3 instead) */ |
380 | #define SVGA_CAP_8BIT_EMULATION 0x00000100 | 422 | #define SVGA_CAP_8BIT_EMULATION 0x00000100 |
381 | #define SVGA_CAP_ALPHA_CURSOR 0x00000200 | 423 | #define SVGA_CAP_ALPHA_CURSOR 0x00000200 |
382 | #define SVGA_CAP_3D 0x00004000 | 424 | #define SVGA_CAP_3D 0x00004000 |
383 | #define SVGA_CAP_EXTENDED_FIFO 0x00008000 | 425 | #define SVGA_CAP_EXTENDED_FIFO 0x00008000 |
384 | #define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support | 426 | #define SVGA_CAP_MULTIMON 0x00010000 /* Legacy multi-monitor support */ |
385 | #define SVGA_CAP_PITCHLOCK 0x00020000 | 427 | #define SVGA_CAP_PITCHLOCK 0x00020000 |
386 | #define SVGA_CAP_IRQMASK 0x00040000 | 428 | #define SVGA_CAP_IRQMASK 0x00040000 |
387 | #define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support | 429 | #define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 /* Legacy multi-monitor support */ |
388 | #define SVGA_CAP_GMR 0x00100000 | 430 | #define SVGA_CAP_GMR 0x00100000 |
389 | #define SVGA_CAP_TRACES 0x00200000 | 431 | #define SVGA_CAP_TRACES 0x00200000 |
432 | #define SVGA_CAP_GMR2 0x00400000 | ||
433 | #define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 | ||
390 | 434 | ||
391 | 435 | ||
392 | /* | 436 | /* |
@@ -431,7 +475,7 @@ enum { | |||
431 | 475 | ||
432 | SVGA_FIFO_CAPABILITIES = 4, | 476 | SVGA_FIFO_CAPABILITIES = 4, |
433 | SVGA_FIFO_FLAGS, | 477 | SVGA_FIFO_FLAGS, |
434 | // Valid with SVGA_FIFO_CAP_FENCE: | 478 | /* Valid with SVGA_FIFO_CAP_FENCE: */ |
435 | SVGA_FIFO_FENCE, | 479 | SVGA_FIFO_FENCE, |
436 | 480 | ||
437 | /* | 481 | /* |
@@ -444,33 +488,47 @@ enum { | |||
444 | * extended FIFO. | 488 | * extended FIFO. |
445 | */ | 489 | */ |
446 | 490 | ||
447 | // Valid if exists (i.e. if extended FIFO enabled): | 491 | /* Valid if exists (i.e. if extended FIFO enabled): */ |
448 | SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */ | 492 | SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */ |
449 | // Valid with SVGA_FIFO_CAP_PITCHLOCK: | 493 | /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */ |
450 | SVGA_FIFO_PITCHLOCK, | 494 | SVGA_FIFO_PITCHLOCK, |
451 | 495 | ||
452 | // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: | 496 | /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */ |
453 | SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */ | 497 | SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */ |
454 | SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */ | 498 | SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */ |
455 | SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */ | 499 | SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */ |
456 | SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */ | 500 | SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */ |
457 | SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */ | 501 | SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */ |
458 | 502 | ||
459 | // Valid with SVGA_FIFO_CAP_RESERVE: | 503 | /* Valid with SVGA_FIFO_CAP_RESERVE: */ |
460 | SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */ | 504 | SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */ |
461 | 505 | ||
462 | /* | 506 | /* |
463 | * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT: | 507 | * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2: |
464 | * | 508 | * |
465 | * By default this is SVGA_ID_INVALID, to indicate that the cursor | 509 | * By default this is SVGA_ID_INVALID, to indicate that the cursor |
466 | * coordinates are specified relative to the virtual root. If this | 510 | * coordinates are specified relative to the virtual root. If this |
467 | * is set to a specific screen ID, cursor position is reinterpreted | 511 | * is set to a specific screen ID, cursor position is reinterpreted |
468 | * as a signed offset relative to that screen's origin. This is the | 512 | * as a signed offset relative to that screen's origin. |
469 | * only way to place the cursor on a non-rooted screen. | ||
470 | */ | 513 | */ |
471 | SVGA_FIFO_CURSOR_SCREEN_ID, | 514 | SVGA_FIFO_CURSOR_SCREEN_ID, |
472 | 515 | ||
473 | /* | 516 | /* |
517 | * Valid with SVGA_FIFO_CAP_DEAD | ||
518 | * | ||
519 | * An arbitrary value written by the host, drivers should not use it. | ||
520 | */ | ||
521 | SVGA_FIFO_DEAD, | ||
522 | |||
523 | /* | ||
524 | * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED: | ||
525 | * | ||
526 | * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h) | ||
527 | * on platforms that can enforce graphics resource limits. | ||
528 | */ | ||
529 | SVGA_FIFO_3D_HWVERSION_REVISED, | ||
530 | |||
531 | /* | ||
474 | * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new | 532 | * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new |
475 | * registers, but this must be done carefully and with judicious use of | 533 | * registers, but this must be done carefully and with judicious use of |
476 | * capability bits, since comparisons based on SVGA_FIFO_MIN aren't | 534 | * capability bits, since comparisons based on SVGA_FIFO_MIN aren't |
@@ -508,7 +566,7 @@ enum { | |||
508 | * sets SVGA_FIFO_MIN high enough to leave room for them. | 566 | * sets SVGA_FIFO_MIN high enough to leave room for them. |
509 | */ | 567 | */ |
510 | 568 | ||
511 | // Valid if register exists: | 569 | /* Valid if register exists: */ |
512 | SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */ | 570 | SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */ |
513 | SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */ | 571 | SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */ |
514 | SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */ | 572 | SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */ |
@@ -709,6 +767,37 @@ enum { | |||
709 | * | 767 | * |
710 | * - When a screen is resized, either using Screen Object commands or | 768 | * - When a screen is resized, either using Screen Object commands or |
711 | * legacy multimon registers, its contents are preserved. | 769 | * legacy multimon registers, its contents are preserved. |
770 | * | ||
771 | * SVGA_FIFO_CAP_GMR2 -- | ||
772 | * | ||
773 | * Provides new commands to define and remap guest memory regions (GMR). | ||
774 | * | ||
775 | * New 2D commands: | ||
776 | * DEFINE_GMR2, REMAP_GMR2. | ||
777 | * | ||
778 | * SVGA_FIFO_CAP_3D_HWVERSION_REVISED -- | ||
779 | * | ||
780 | * Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists. | ||
781 | * This register may replace SVGA_FIFO_3D_HWVERSION on platforms | ||
782 | * that enforce graphics resource limits. This allows the platform | ||
783 | * to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest | ||
784 | * drivers that do not limit their resources. | ||
785 | * | ||
786 | * Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators | ||
787 | * are codependent (and thus we use a single capability bit). | ||
788 | * | ||
789 | * SVGA_FIFO_CAP_SCREEN_OBJECT_2 -- | ||
790 | * | ||
791 | * Modifies the DEFINE_SCREEN command to include a guest provided | ||
792 | * backing store in GMR memory and the bytesPerLine for the backing | ||
793 | * store. This capability requires the use of a backing store when | ||
794 | * creating screen objects. However if SVGA_FIFO_CAP_SCREEN_OBJECT | ||
795 | * is present then backing stores are optional. | ||
796 | * | ||
797 | * SVGA_FIFO_CAP_DEAD -- | ||
798 | * | ||
799 | * Drivers should not use this cap bit. This cap bit can not be | ||
800 | * reused since some hosts already expose it. | ||
712 | */ | 801 | */ |
713 | 802 | ||
714 | #define SVGA_FIFO_CAP_NONE 0 | 803 | #define SVGA_FIFO_CAP_NONE 0 |
@@ -720,6 +809,10 @@ enum { | |||
720 | #define SVGA_FIFO_CAP_ESCAPE (1<<5) | 809 | #define SVGA_FIFO_CAP_ESCAPE (1<<5) |
721 | #define SVGA_FIFO_CAP_RESERVE (1<<6) | 810 | #define SVGA_FIFO_CAP_RESERVE (1<<6) |
722 | #define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7) | 811 | #define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7) |
812 | #define SVGA_FIFO_CAP_GMR2 (1<<8) | ||
813 | #define SVGA_FIFO_CAP_3D_HWVERSION_REVISED SVGA_FIFO_CAP_GMR2 | ||
814 | #define SVGA_FIFO_CAP_SCREEN_OBJECT_2 (1<<9) | ||
815 | #define SVGA_FIFO_CAP_DEAD (1<<10) | ||
723 | 816 | ||
724 | 817 | ||
725 | /* | 818 | /* |
@@ -730,7 +823,7 @@ enum { | |||
730 | 823 | ||
731 | #define SVGA_FIFO_FLAG_NONE 0 | 824 | #define SVGA_FIFO_FLAG_NONE 0 |
732 | #define SVGA_FIFO_FLAG_ACCELFRONT (1<<0) | 825 | #define SVGA_FIFO_FLAG_ACCELFRONT (1<<0) |
733 | #define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only | 826 | #define SVGA_FIFO_FLAG_RESERVED (1<<31) /* Internal use only */ |
734 | 827 | ||
735 | /* | 828 | /* |
736 | * FIFO reservation sentinel value | 829 | * FIFO reservation sentinel value |
@@ -763,22 +856,22 @@ enum { | |||
763 | SVGA_VIDEO_DATA_OFFSET, | 856 | SVGA_VIDEO_DATA_OFFSET, |
764 | SVGA_VIDEO_FORMAT, | 857 | SVGA_VIDEO_FORMAT, |
765 | SVGA_VIDEO_COLORKEY, | 858 | SVGA_VIDEO_COLORKEY, |
766 | SVGA_VIDEO_SIZE, // Deprecated | 859 | SVGA_VIDEO_SIZE, /* Deprecated */ |
767 | SVGA_VIDEO_WIDTH, | 860 | SVGA_VIDEO_WIDTH, |
768 | SVGA_VIDEO_HEIGHT, | 861 | SVGA_VIDEO_HEIGHT, |
769 | SVGA_VIDEO_SRC_X, | 862 | SVGA_VIDEO_SRC_X, |
770 | SVGA_VIDEO_SRC_Y, | 863 | SVGA_VIDEO_SRC_Y, |
771 | SVGA_VIDEO_SRC_WIDTH, | 864 | SVGA_VIDEO_SRC_WIDTH, |
772 | SVGA_VIDEO_SRC_HEIGHT, | 865 | SVGA_VIDEO_SRC_HEIGHT, |
773 | SVGA_VIDEO_DST_X, // Signed int32 | 866 | SVGA_VIDEO_DST_X, /* Signed int32 */ |
774 | SVGA_VIDEO_DST_Y, // Signed int32 | 867 | SVGA_VIDEO_DST_Y, /* Signed int32 */ |
775 | SVGA_VIDEO_DST_WIDTH, | 868 | SVGA_VIDEO_DST_WIDTH, |
776 | SVGA_VIDEO_DST_HEIGHT, | 869 | SVGA_VIDEO_DST_HEIGHT, |
777 | SVGA_VIDEO_PITCH_1, | 870 | SVGA_VIDEO_PITCH_1, |
778 | SVGA_VIDEO_PITCH_2, | 871 | SVGA_VIDEO_PITCH_2, |
779 | SVGA_VIDEO_PITCH_3, | 872 | SVGA_VIDEO_PITCH_3, |
780 | SVGA_VIDEO_DATA_GMRID, // Optional, defaults to SVGA_GMR_FRAMEBUFFER | 873 | SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */ |
781 | SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID) | 874 | SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */ |
782 | SVGA_VIDEO_NUM_REGS | 875 | SVGA_VIDEO_NUM_REGS |
783 | }; | 876 | }; |
784 | 877 | ||
@@ -829,15 +922,51 @@ typedef struct SVGAOverlayUnit { | |||
829 | * compatibility. New flags can be added, and the struct may grow, | 922 | * compatibility. New flags can be added, and the struct may grow, |
830 | * but existing fields must retain their meaning. | 923 | * but existing fields must retain their meaning. |
831 | * | 924 | * |
925 | * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of | ||
926 | * a SVGAGuestPtr that is used to back the screen contents. This | ||
927 | * memory must come from the GFB. The guest is not allowed to | ||
928 | * access the memory and doing so will have undefined results. The | ||
929 | * backing store is required to be page aligned and the size is | ||
930 | * padded to the next page boundry. The number of pages is: | ||
931 | * (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE | ||
932 | * | ||
933 | * The pitch in the backingStore is required to be at least large | ||
934 | * enough to hold a 32bbp scanline. It is recommended that the | ||
935 | * driver pad bytesPerLine for a potential performance win. | ||
936 | * | ||
937 | * The cloneCount field is treated as a hint from the guest that | ||
938 | * the user wants this display to be cloned, countCount times. A | ||
939 | * value of zero means no cloning should happen. | ||
940 | */ | ||
941 | |||
942 | #define SVGA_SCREEN_MUST_BE_SET (1 << 0) /* Must be set or results undefined */ | ||
943 | #define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */ | ||
944 | #define SVGA_SCREEN_IS_PRIMARY (1 << 1) /* Guest considers this screen to be 'primary' */ | ||
945 | #define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */ | ||
946 | |||
947 | /* | ||
948 | * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is | ||
949 | * deactivated the base layer is defined to lose all contents and | ||
950 | * become black. When a screen is deactivated the backing store is | ||
951 | * optional. When set backingPtr and bytesPerLine will be ignored. | ||
832 | */ | 952 | */ |
953 | #define SVGA_SCREEN_DEACTIVATE (1 << 3) | ||
833 | 954 | ||
834 | #define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space | 955 | /* |
835 | #define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary' | 956 | * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When this flag is set |
836 | #define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here | 957 | * the screen contents will be outputted as all black to the user |
958 | * though the base layer contents is preserved. The screen base layer | ||
959 | * can still be read and written to like normal though the no visible | ||
960 | * effect will be seen by the user. When the flag is changed the | ||
961 | * screen will be blanked or redrawn to the current contents as needed | ||
962 | * without any extra commands from the driver. This flag only has an | ||
963 | * effect when the screen is not deactivated. | ||
964 | */ | ||
965 | #define SVGA_SCREEN_BLANKING (1 << 4) | ||
837 | 966 | ||
838 | typedef | 967 | typedef |
839 | struct SVGAScreenObject { | 968 | struct SVGAScreenObject { |
840 | uint32 structSize; // sizeof(SVGAScreenObject) | 969 | uint32 structSize; /* sizeof(SVGAScreenObject) */ |
841 | uint32 id; | 970 | uint32 id; |
842 | uint32 flags; | 971 | uint32 flags; |
843 | struct { | 972 | struct { |
@@ -847,7 +976,14 @@ struct SVGAScreenObject { | |||
847 | struct { | 976 | struct { |
848 | int32 x; | 977 | int32 x; |
849 | int32 y; | 978 | int32 y; |
850 | } root; // Only used if SVGA_SCREEN_HAS_ROOT is set. | 979 | } root; |
980 | |||
981 | /* | ||
982 | * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional | ||
983 | * with SVGA_FIFO_CAP_SCREEN_OBJECT. | ||
984 | */ | ||
985 | SVGAGuestImage backingStore; | ||
986 | uint32 cloneCount; | ||
851 | } SVGAScreenObject; | 987 | } SVGAScreenObject; |
852 | 988 | ||
853 | 989 | ||
@@ -885,6 +1021,8 @@ typedef enum { | |||
885 | SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38, | 1021 | SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38, |
886 | SVGA_CMD_ANNOTATION_FILL = 39, | 1022 | SVGA_CMD_ANNOTATION_FILL = 39, |
887 | SVGA_CMD_ANNOTATION_COPY = 40, | 1023 | SVGA_CMD_ANNOTATION_COPY = 40, |
1024 | SVGA_CMD_DEFINE_GMR2 = 41, | ||
1025 | SVGA_CMD_REMAP_GMR2 = 42, | ||
888 | SVGA_CMD_MAX | 1026 | SVGA_CMD_MAX |
889 | } SVGAFifoCmdId; | 1027 | } SVGAFifoCmdId; |
890 | 1028 | ||
@@ -920,7 +1058,7 @@ typedef enum { | |||
920 | */ | 1058 | */ |
921 | 1059 | ||
922 | typedef | 1060 | typedef |
923 | struct { | 1061 | struct SVGAFifoCmdUpdate { |
924 | uint32 x; | 1062 | uint32 x; |
925 | uint32 y; | 1063 | uint32 y; |
926 | uint32 width; | 1064 | uint32 width; |
@@ -939,7 +1077,7 @@ struct { | |||
939 | */ | 1077 | */ |
940 | 1078 | ||
941 | typedef | 1079 | typedef |
942 | struct { | 1080 | struct SVGAFifoCmdRectCopy { |
943 | uint32 srcX; | 1081 | uint32 srcX; |
944 | uint32 srcY; | 1082 | uint32 srcY; |
945 | uint32 destX; | 1083 | uint32 destX; |
@@ -963,14 +1101,14 @@ struct { | |||
963 | */ | 1101 | */ |
964 | 1102 | ||
965 | typedef | 1103 | typedef |
966 | struct { | 1104 | struct SVGAFifoCmdDefineCursor { |
967 | uint32 id; // Reserved, must be zero. | 1105 | uint32 id; /* Reserved, must be zero. */ |
968 | uint32 hotspotX; | 1106 | uint32 hotspotX; |
969 | uint32 hotspotY; | 1107 | uint32 hotspotY; |
970 | uint32 width; | 1108 | uint32 width; |
971 | uint32 height; | 1109 | uint32 height; |
972 | uint32 andMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL | 1110 | uint32 andMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */ |
973 | uint32 xorMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL | 1111 | uint32 xorMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */ |
974 | /* | 1112 | /* |
975 | * Followed by scanline data for AND mask, then XOR mask. | 1113 | * Followed by scanline data for AND mask, then XOR mask. |
976 | * Each scanline is padded to a 32-bit boundary. | 1114 | * Each scanline is padded to a 32-bit boundary. |
@@ -992,8 +1130,8 @@ struct { | |||
992 | */ | 1130 | */ |
993 | 1131 | ||
994 | typedef | 1132 | typedef |
995 | struct { | 1133 | struct SVGAFifoCmdDefineAlphaCursor { |
996 | uint32 id; // Reserved, must be zero. | 1134 | uint32 id; /* Reserved, must be zero. */ |
997 | uint32 hotspotX; | 1135 | uint32 hotspotX; |
998 | uint32 hotspotY; | 1136 | uint32 hotspotY; |
999 | uint32 width; | 1137 | uint32 width; |
@@ -1015,7 +1153,7 @@ struct { | |||
1015 | */ | 1153 | */ |
1016 | 1154 | ||
1017 | typedef | 1155 | typedef |
1018 | struct { | 1156 | struct SVGAFifoCmdUpdateVerbose { |
1019 | uint32 x; | 1157 | uint32 x; |
1020 | uint32 y; | 1158 | uint32 y; |
1021 | uint32 width; | 1159 | uint32 width; |
@@ -1040,13 +1178,13 @@ struct { | |||
1040 | #define SVGA_ROP_COPY 0x03 | 1178 | #define SVGA_ROP_COPY 0x03 |
1041 | 1179 | ||
1042 | typedef | 1180 | typedef |
1043 | struct { | 1181 | struct SVGAFifoCmdFrontRopFill { |
1044 | uint32 color; // In the same format as the GFB | 1182 | uint32 color; /* In the same format as the GFB */ |
1045 | uint32 x; | 1183 | uint32 x; |
1046 | uint32 y; | 1184 | uint32 y; |
1047 | uint32 width; | 1185 | uint32 width; |
1048 | uint32 height; | 1186 | uint32 height; |
1049 | uint32 rop; // Must be SVGA_ROP_COPY | 1187 | uint32 rop; /* Must be SVGA_ROP_COPY */ |
1050 | } SVGAFifoCmdFrontRopFill; | 1188 | } SVGAFifoCmdFrontRopFill; |
1051 | 1189 | ||
1052 | 1190 | ||
@@ -1083,7 +1221,7 @@ struct { | |||
1083 | */ | 1221 | */ |
1084 | 1222 | ||
1085 | typedef | 1223 | typedef |
1086 | struct { | 1224 | struct SVGAFifoCmdEscape { |
1087 | uint32 nsid; | 1225 | uint32 nsid; |
1088 | uint32 size; | 1226 | uint32 size; |
1089 | /* followed by 'size' bytes of data */ | 1227 | /* followed by 'size' bytes of data */ |
@@ -1113,12 +1251,12 @@ struct { | |||
1113 | * registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*). | 1251 | * registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*). |
1114 | * | 1252 | * |
1115 | * Availability: | 1253 | * Availability: |
1116 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1254 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1117 | */ | 1255 | */ |
1118 | 1256 | ||
1119 | typedef | 1257 | typedef |
1120 | struct { | 1258 | struct { |
1121 | SVGAScreenObject screen; // Variable-length according to version | 1259 | SVGAScreenObject screen; /* Variable-length according to version */ |
1122 | } SVGAFifoCmdDefineScreen; | 1260 | } SVGAFifoCmdDefineScreen; |
1123 | 1261 | ||
1124 | 1262 | ||
@@ -1129,7 +1267,7 @@ struct { | |||
1129 | * re-use. | 1267 | * re-use. |
1130 | * | 1268 | * |
1131 | * Availability: | 1269 | * Availability: |
1132 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1270 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1133 | */ | 1271 | */ |
1134 | 1272 | ||
1135 | typedef | 1273 | typedef |
@@ -1182,7 +1320,7 @@ struct { | |||
1182 | * GMRFB. | 1320 | * GMRFB. |
1183 | * | 1321 | * |
1184 | * Availability: | 1322 | * Availability: |
1185 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1323 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1186 | */ | 1324 | */ |
1187 | 1325 | ||
1188 | typedef | 1326 | typedef |
@@ -1219,7 +1357,7 @@ struct { | |||
1219 | * SVGA_CMD_ANNOTATION_* commands for details. | 1357 | * SVGA_CMD_ANNOTATION_* commands for details. |
1220 | * | 1358 | * |
1221 | * Availability: | 1359 | * Availability: |
1222 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1360 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1223 | */ | 1361 | */ |
1224 | 1362 | ||
1225 | typedef | 1363 | typedef |
@@ -1267,7 +1405,7 @@ struct { | |||
1267 | * the time any subsequent FENCE commands are reached. | 1405 | * the time any subsequent FENCE commands are reached. |
1268 | * | 1406 | * |
1269 | * Availability: | 1407 | * Availability: |
1270 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1408 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1271 | */ | 1409 | */ |
1272 | 1410 | ||
1273 | typedef | 1411 | typedef |
@@ -1302,7 +1440,7 @@ struct { | |||
1302 | * user's display is being remoted over a network connection. | 1440 | * user's display is being remoted over a network connection. |
1303 | * | 1441 | * |
1304 | * Availability: | 1442 | * Availability: |
1305 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1443 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1306 | */ | 1444 | */ |
1307 | 1445 | ||
1308 | typedef | 1446 | typedef |
@@ -1334,7 +1472,7 @@ struct { | |||
1334 | * undefined. | 1472 | * undefined. |
1335 | * | 1473 | * |
1336 | * Availability: | 1474 | * Availability: |
1337 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1475 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1338 | */ | 1476 | */ |
1339 | 1477 | ||
1340 | typedef | 1478 | typedef |
@@ -1343,4 +1481,72 @@ struct { | |||
1343 | uint32 srcScreenId; | 1481 | uint32 srcScreenId; |
1344 | } SVGAFifoCmdAnnotationCopy; | 1482 | } SVGAFifoCmdAnnotationCopy; |
1345 | 1483 | ||
1484 | |||
1485 | /* | ||
1486 | * SVGA_CMD_DEFINE_GMR2 -- | ||
1487 | * | ||
1488 | * Define guest memory region v2. See the description of GMRs above. | ||
1489 | * | ||
1490 | * Availability: | ||
1491 | * SVGA_CAP_GMR2 | ||
1492 | */ | ||
1493 | |||
1494 | typedef | ||
1495 | struct { | ||
1496 | uint32 gmrId; | ||
1497 | uint32 numPages; | ||
1498 | } SVGAFifoCmdDefineGMR2; | ||
1499 | |||
1500 | |||
1501 | /* | ||
1502 | * SVGA_CMD_REMAP_GMR2 -- | ||
1503 | * | ||
1504 | * Remap guest memory region v2. See the description of GMRs above. | ||
1505 | * | ||
1506 | * This command allows guest to modify a portion of an existing GMR by | ||
1507 | * invalidating it or reassigning it to different guest physical pages. | ||
1508 | * The pages are identified by physical page number (PPN). The pages | ||
1509 | * are assumed to be pinned and valid for DMA operations. | ||
1510 | * | ||
1511 | * Description of command flags: | ||
1512 | * | ||
1513 | * SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR. | ||
1514 | * The PPN list must not overlap with the remap region (this can be | ||
1515 | * handled trivially by referencing a separate GMR). If flag is | ||
1516 | * disabled, PPN list is appended to SVGARemapGMR command. | ||
1517 | * | ||
1518 | * SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise | ||
1519 | * it is in PPN32 format. | ||
1520 | * | ||
1521 | * SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry. | ||
1522 | * A single PPN can be used to invalidate a portion of a GMR or | ||
1523 | * map it to to a single guest scratch page. | ||
1524 | * | ||
1525 | * Availability: | ||
1526 | * SVGA_CAP_GMR2 | ||
1527 | */ | ||
1528 | |||
1529 | typedef enum { | ||
1530 | SVGA_REMAP_GMR2_PPN32 = 0, | ||
1531 | SVGA_REMAP_GMR2_VIA_GMR = (1 << 0), | ||
1532 | SVGA_REMAP_GMR2_PPN64 = (1 << 1), | ||
1533 | SVGA_REMAP_GMR2_SINGLE_PPN = (1 << 2), | ||
1534 | } SVGARemapGMR2Flags; | ||
1535 | |||
1536 | typedef | ||
1537 | struct { | ||
1538 | uint32 gmrId; | ||
1539 | SVGARemapGMR2Flags flags; | ||
1540 | uint32 offsetPages; /* offset in pages to begin remap */ | ||
1541 | uint32 numPages; /* number of pages to remap */ | ||
1542 | /* | ||
1543 | * Followed by additional data depending on SVGARemapGMR2Flags. | ||
1544 | * | ||
1545 | * If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows. | ||
1546 | * Otherwise an array of page descriptors in PPN32 or PPN64 format | ||
1547 | * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag | ||
1548 | * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry. | ||
1549 | */ | ||
1550 | } SVGAFifoCmdRemapGMR2; | ||
1551 | |||
1346 | #endif | 1552 | #endif |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 87e43e0733bf..5a72ed908232 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -42,6 +42,10 @@ static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | |||
42 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | | 42 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
43 | TTM_PL_FLAG_CACHED; | 43 | TTM_PL_FLAG_CACHED; |
44 | 44 | ||
45 | static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | | ||
46 | TTM_PL_FLAG_CACHED | | ||
47 | TTM_PL_FLAG_NO_EVICT; | ||
48 | |||
45 | struct ttm_placement vmw_vram_placement = { | 49 | struct ttm_placement vmw_vram_placement = { |
46 | .fpfn = 0, | 50 | .fpfn = 0, |
47 | .lpfn = 0, | 51 | .lpfn = 0, |
@@ -56,6 +60,11 @@ static uint32_t vram_gmr_placement_flags[] = { | |||
56 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | 60 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
57 | }; | 61 | }; |
58 | 62 | ||
63 | static uint32_t gmr_vram_placement_flags[] = { | ||
64 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, | ||
65 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | ||
66 | }; | ||
67 | |||
59 | struct ttm_placement vmw_vram_gmr_placement = { | 68 | struct ttm_placement vmw_vram_gmr_placement = { |
60 | .fpfn = 0, | 69 | .fpfn = 0, |
61 | .lpfn = 0, | 70 | .lpfn = 0, |
@@ -65,6 +74,20 @@ struct ttm_placement vmw_vram_gmr_placement = { | |||
65 | .busy_placement = &gmr_placement_flags | 74 | .busy_placement = &gmr_placement_flags |
66 | }; | 75 | }; |
67 | 76 | ||
77 | static uint32_t vram_gmr_ne_placement_flags[] = { | ||
78 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT, | ||
79 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT | ||
80 | }; | ||
81 | |||
82 | struct ttm_placement vmw_vram_gmr_ne_placement = { | ||
83 | .fpfn = 0, | ||
84 | .lpfn = 0, | ||
85 | .num_placement = 2, | ||
86 | .placement = vram_gmr_ne_placement_flags, | ||
87 | .num_busy_placement = 1, | ||
88 | .busy_placement = &gmr_ne_placement_flags | ||
89 | }; | ||
90 | |||
68 | struct ttm_placement vmw_vram_sys_placement = { | 91 | struct ttm_placement vmw_vram_sys_placement = { |
69 | .fpfn = 0, | 92 | .fpfn = 0, |
70 | .lpfn = 0, | 93 | .lpfn = 0, |
@@ -92,6 +115,30 @@ struct ttm_placement vmw_sys_placement = { | |||
92 | .busy_placement = &sys_placement_flags | 115 | .busy_placement = &sys_placement_flags |
93 | }; | 116 | }; |
94 | 117 | ||
118 | static uint32_t evictable_placement_flags[] = { | ||
119 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, | ||
120 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | ||
121 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | ||
122 | }; | ||
123 | |||
124 | struct ttm_placement vmw_evictable_placement = { | ||
125 | .fpfn = 0, | ||
126 | .lpfn = 0, | ||
127 | .num_placement = 3, | ||
128 | .placement = evictable_placement_flags, | ||
129 | .num_busy_placement = 1, | ||
130 | .busy_placement = &sys_placement_flags | ||
131 | }; | ||
132 | |||
133 | struct ttm_placement vmw_srf_placement = { | ||
134 | .fpfn = 0, | ||
135 | .lpfn = 0, | ||
136 | .num_placement = 1, | ||
137 | .num_busy_placement = 2, | ||
138 | .placement = &gmr_placement_flags, | ||
139 | .busy_placement = gmr_vram_placement_flags | ||
140 | }; | ||
141 | |||
95 | struct vmw_ttm_backend { | 142 | struct vmw_ttm_backend { |
96 | struct ttm_backend backend; | 143 | struct ttm_backend backend; |
97 | struct page **pages; | 144 | struct page **pages; |
@@ -274,39 +321,39 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
274 | 321 | ||
275 | static void *vmw_sync_obj_ref(void *sync_obj) | 322 | static void *vmw_sync_obj_ref(void *sync_obj) |
276 | { | 323 | { |
277 | return sync_obj; | 324 | |
325 | return (void *) | ||
326 | vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj); | ||
278 | } | 327 | } |
279 | 328 | ||
280 | static void vmw_sync_obj_unref(void **sync_obj) | 329 | static void vmw_sync_obj_unref(void **sync_obj) |
281 | { | 330 | { |
282 | *sync_obj = NULL; | 331 | vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); |
283 | } | 332 | } |
284 | 333 | ||
285 | static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) | 334 | static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) |
286 | { | 335 | { |
287 | struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; | 336 | vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); |
288 | |||
289 | mutex_lock(&dev_priv->hw_mutex); | ||
290 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | ||
291 | mutex_unlock(&dev_priv->hw_mutex); | ||
292 | return 0; | 337 | return 0; |
293 | } | 338 | } |
294 | 339 | ||
295 | static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) | 340 | static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) |
296 | { | 341 | { |
297 | struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; | 342 | unsigned long flags = (unsigned long) sync_arg; |
298 | uint32_t sequence = (unsigned long) sync_obj; | 343 | return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, |
344 | (uint32_t) flags); | ||
299 | 345 | ||
300 | return vmw_fence_signaled(dev_priv, sequence); | ||
301 | } | 346 | } |
302 | 347 | ||
303 | static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, | 348 | static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, |
304 | bool lazy, bool interruptible) | 349 | bool lazy, bool interruptible) |
305 | { | 350 | { |
306 | struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; | 351 | unsigned long flags = (unsigned long) sync_arg; |
307 | uint32_t sequence = (unsigned long) sync_obj; | ||
308 | 352 | ||
309 | return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ); | 353 | return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, |
354 | (uint32_t) flags, | ||
355 | lazy, interruptible, | ||
356 | VMW_FENCE_WAIT_TIMEOUT); | ||
310 | } | 357 | } |
311 | 358 | ||
312 | struct ttm_bo_driver vmw_bo_driver = { | 359 | struct ttm_bo_driver vmw_bo_driver = { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c new file mode 100644 index 000000000000..3fa884db08ab --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | |||
@@ -0,0 +1,322 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "ttm/ttm_placement.h" | ||
29 | |||
30 | #include "drmP.h" | ||
31 | #include "vmwgfx_drv.h" | ||
32 | |||
33 | |||
34 | /** | ||
35 | * vmw_dmabuf_to_placement - Validate a buffer to placement. | ||
36 | * | ||
37 | * @dev_priv: Driver private. | ||
38 | * @buf: DMA buffer to move. | ||
39 | * @pin: Pin buffer if true. | ||
40 | * @interruptible: Use interruptible wait. | ||
41 | * | ||
42 | * May only be called by the current master since it assumes that the | ||
43 | * master lock is the current master's lock. | ||
44 | * This function takes the master's lock in write mode. | ||
45 | * Flushes and unpins the query bo to avoid failures. | ||
46 | * | ||
47 | * Returns | ||
48 | * -ERESTARTSYS if interrupted by a signal. | ||
49 | */ | ||
50 | int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, | ||
51 | struct vmw_dma_buffer *buf, | ||
52 | struct ttm_placement *placement, | ||
53 | bool interruptible) | ||
54 | { | ||
55 | struct vmw_master *vmaster = dev_priv->active_master; | ||
56 | struct ttm_buffer_object *bo = &buf->base; | ||
57 | int ret; | ||
58 | |||
59 | ret = ttm_write_lock(&vmaster->lock, interruptible); | ||
60 | if (unlikely(ret != 0)) | ||
61 | return ret; | ||
62 | |||
63 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | ||
64 | |||
65 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | ||
66 | if (unlikely(ret != 0)) | ||
67 | goto err; | ||
68 | |||
69 | ret = ttm_bo_validate(bo, placement, interruptible, false, false); | ||
70 | |||
71 | ttm_bo_unreserve(bo); | ||
72 | |||
73 | err: | ||
74 | ttm_write_unlock(&vmaster->lock); | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr. | ||
80 | * | ||
81 | * May only be called by the current master since it assumes that the | ||
82 | * master lock is the current master's lock. | ||
83 | * This function takes the master's lock in write mode. | ||
84 | * Flushes and unpins the query bo if @pin == true to avoid failures. | ||
85 | * | ||
86 | * @dev_priv: Driver private. | ||
87 | * @buf: DMA buffer to move. | ||
88 | * @pin: Pin buffer if true. | ||
89 | * @interruptible: Use interruptible wait. | ||
90 | * | ||
91 | * Returns | ||
92 | * -ERESTARTSYS if interrupted by a signal. | ||
93 | */ | ||
94 | int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, | ||
95 | struct vmw_dma_buffer *buf, | ||
96 | bool pin, bool interruptible) | ||
97 | { | ||
98 | struct vmw_master *vmaster = dev_priv->active_master; | ||
99 | struct ttm_buffer_object *bo = &buf->base; | ||
100 | struct ttm_placement *placement; | ||
101 | int ret; | ||
102 | |||
103 | ret = ttm_write_lock(&vmaster->lock, interruptible); | ||
104 | if (unlikely(ret != 0)) | ||
105 | return ret; | ||
106 | |||
107 | if (pin) | ||
108 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | ||
109 | |||
110 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | ||
111 | if (unlikely(ret != 0)) | ||
112 | goto err; | ||
113 | |||
114 | /** | ||
115 | * Put BO in VRAM if there is space, otherwise as a GMR. | ||
116 | * If there is no space in VRAM and GMR ids are all used up, | ||
117 | * start evicting GMRs to make room. If the DMA buffer can't be | ||
118 | * used as a GMR, this will return -ENOMEM. | ||
119 | */ | ||
120 | |||
121 | if (pin) | ||
122 | placement = &vmw_vram_gmr_ne_placement; | ||
123 | else | ||
124 | placement = &vmw_vram_gmr_placement; | ||
125 | |||
126 | ret = ttm_bo_validate(bo, placement, interruptible, false, false); | ||
127 | if (likely(ret == 0) || ret == -ERESTARTSYS) | ||
128 | goto err_unreserve; | ||
129 | |||
130 | |||
131 | /** | ||
132 | * If that failed, try VRAM again, this time evicting | ||
133 | * previous contents. | ||
134 | */ | ||
135 | |||
136 | if (pin) | ||
137 | placement = &vmw_vram_ne_placement; | ||
138 | else | ||
139 | placement = &vmw_vram_placement; | ||
140 | |||
141 | ret = ttm_bo_validate(bo, placement, interruptible, false, false); | ||
142 | |||
143 | err_unreserve: | ||
144 | ttm_bo_unreserve(bo); | ||
145 | err: | ||
146 | ttm_write_unlock(&vmaster->lock); | ||
147 | return ret; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * vmw_dmabuf_to_vram - Move a buffer to vram. | ||
152 | * | ||
153 | * May only be called by the current master since it assumes that the | ||
154 | * master lock is the current master's lock. | ||
155 | * This function takes the master's lock in write mode. | ||
156 | * | ||
157 | * @dev_priv: Driver private. | ||
158 | * @buf: DMA buffer to move. | ||
159 | * @pin: Pin buffer in vram if true. | ||
160 | * @interruptible: Use interruptible wait. | ||
161 | * | ||
162 | * Returns | ||
163 | * -ERESTARTSYS if interrupted by a signal. | ||
164 | */ | ||
165 | int vmw_dmabuf_to_vram(struct vmw_private *dev_priv, | ||
166 | struct vmw_dma_buffer *buf, | ||
167 | bool pin, bool interruptible) | ||
168 | { | ||
169 | struct ttm_placement *placement; | ||
170 | |||
171 | if (pin) | ||
172 | placement = &vmw_vram_ne_placement; | ||
173 | else | ||
174 | placement = &vmw_vram_placement; | ||
175 | |||
176 | return vmw_dmabuf_to_placement(dev_priv, buf, | ||
177 | placement, | ||
178 | interruptible); | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram. | ||
183 | * | ||
184 | * May only be called by the current master since it assumes that the | ||
185 | * master lock is the current master's lock. | ||
186 | * This function takes the master's lock in write mode. | ||
187 | * Flushes and unpins the query bo if @pin == true to avoid failures. | ||
188 | * | ||
189 | * @dev_priv: Driver private. | ||
190 | * @buf: DMA buffer to move. | ||
191 | * @pin: Pin buffer in vram if true. | ||
192 | * @interruptible: Use interruptible wait. | ||
193 | * | ||
194 | * Returns | ||
195 | * -ERESTARTSYS if interrupted by a signal. | ||
196 | */ | ||
197 | int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, | ||
198 | struct vmw_dma_buffer *buf, | ||
199 | bool pin, bool interruptible) | ||
200 | { | ||
201 | struct vmw_master *vmaster = dev_priv->active_master; | ||
202 | struct ttm_buffer_object *bo = &buf->base; | ||
203 | struct ttm_placement placement; | ||
204 | int ret = 0; | ||
205 | |||
206 | if (pin) | ||
207 | placement = vmw_vram_ne_placement; | ||
208 | else | ||
209 | placement = vmw_vram_placement; | ||
210 | placement.lpfn = bo->num_pages; | ||
211 | |||
212 | ret = ttm_write_lock(&vmaster->lock, interruptible); | ||
213 | if (unlikely(ret != 0)) | ||
214 | return ret; | ||
215 | |||
216 | if (pin) | ||
217 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | ||
218 | |||
219 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | ||
220 | if (unlikely(ret != 0)) | ||
221 | goto err_unlock; | ||
222 | |||
223 | /* Is this buffer already in vram but not at the start of it? */ | ||
224 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
225 | bo->mem.start < bo->num_pages && | ||
226 | bo->mem.start > 0) | ||
227 | (void) ttm_bo_validate(bo, &vmw_sys_placement, false, | ||
228 | false, false); | ||
229 | |||
230 | ret = ttm_bo_validate(bo, &placement, interruptible, false, false); | ||
231 | |||
232 | /* For some reason we didn't up at the start of vram */ | ||
233 | WARN_ON(ret == 0 && bo->offset != 0); | ||
234 | |||
235 | ttm_bo_unreserve(bo); | ||
236 | err_unlock: | ||
237 | ttm_write_unlock(&vmaster->lock); | ||
238 | |||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | |||
243 | /** | ||
244 | * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer. | ||
245 | * | ||
246 | * May only be called by the current master since it assumes that the | ||
247 | * master lock is the current master's lock. | ||
248 | * This function takes the master's lock in write mode. | ||
249 | * | ||
250 | * @dev_priv: Driver private. | ||
251 | * @buf: DMA buffer to unpin. | ||
252 | * @interruptible: Use interruptible wait. | ||
253 | * | ||
254 | * Returns | ||
255 | * -ERESTARTSYS if interrupted by a signal. | ||
256 | */ | ||
257 | int vmw_dmabuf_unpin(struct vmw_private *dev_priv, | ||
258 | struct vmw_dma_buffer *buf, | ||
259 | bool interruptible) | ||
260 | { | ||
261 | /* | ||
262 | * We could in theory early out if the buffer is | ||
263 | * unpinned but we need to lock and reserve the buffer | ||
264 | * anyways so we don't gain much by that. | ||
265 | */ | ||
266 | return vmw_dmabuf_to_placement(dev_priv, buf, | ||
267 | &vmw_evictable_placement, | ||
268 | interruptible); | ||
269 | } | ||
270 | |||
271 | |||
272 | /** | ||
273 | * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement | ||
274 | * of a buffer. | ||
275 | * | ||
276 | * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved. | ||
277 | * @ptr: SVGAGuestPtr returning the result. | ||
278 | */ | ||
279 | void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, | ||
280 | SVGAGuestPtr *ptr) | ||
281 | { | ||
282 | if (bo->mem.mem_type == TTM_PL_VRAM) { | ||
283 | ptr->gmrId = SVGA_GMR_FRAMEBUFFER; | ||
284 | ptr->offset = bo->offset; | ||
285 | } else { | ||
286 | ptr->gmrId = bo->mem.start; | ||
287 | ptr->offset = 0; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | |||
292 | /** | ||
293 | * vmw_bo_pin - Pin or unpin a buffer object without moving it. | ||
294 | * | ||
295 | * @bo: The buffer object. Must be reserved, and present either in VRAM | ||
296 | * or GMR memory. | ||
297 | * @pin: Whether to pin or unpin. | ||
298 | * | ||
299 | */ | ||
300 | void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) | ||
301 | { | ||
302 | uint32_t pl_flags; | ||
303 | struct ttm_placement placement; | ||
304 | uint32_t old_mem_type = bo->mem.mem_type; | ||
305 | int ret; | ||
306 | |||
307 | BUG_ON(!atomic_read(&bo->reserved)); | ||
308 | BUG_ON(old_mem_type != TTM_PL_VRAM && | ||
309 | old_mem_type != VMW_PL_FLAG_GMR); | ||
310 | |||
311 | pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; | ||
312 | if (pin) | ||
313 | pl_flags |= TTM_PL_FLAG_NO_EVICT; | ||
314 | |||
315 | memset(&placement, 0, sizeof(placement)); | ||
316 | placement.num_placement = 1; | ||
317 | placement.placement = &pl_flags; | ||
318 | |||
319 | ret = ttm_bo_validate(bo, &placement, false, true, true); | ||
320 | |||
321 | BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); | ||
322 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 96949b93d920..13afddc1f034 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -82,16 +82,27 @@ | |||
82 | #define DRM_IOCTL_VMW_EXECBUF \ | 82 | #define DRM_IOCTL_VMW_EXECBUF \ |
83 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ | 83 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ |
84 | struct drm_vmw_execbuf_arg) | 84 | struct drm_vmw_execbuf_arg) |
85 | #define DRM_IOCTL_VMW_FIFO_DEBUG \ | 85 | #define DRM_IOCTL_VMW_GET_3D_CAP \ |
86 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \ | 86 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ |
87 | struct drm_vmw_fifo_debug_arg) | 87 | struct drm_vmw_get_3d_cap_arg) |
88 | #define DRM_IOCTL_VMW_FENCE_WAIT \ | 88 | #define DRM_IOCTL_VMW_FENCE_WAIT \ |
89 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ | 89 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ |
90 | struct drm_vmw_fence_wait_arg) | 90 | struct drm_vmw_fence_wait_arg) |
91 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ | 91 | #define DRM_IOCTL_VMW_FENCE_SIGNALED \ |
92 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ | 92 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ |
93 | struct drm_vmw_update_layout_arg) | 93 | struct drm_vmw_fence_signaled_arg) |
94 | 94 | #define DRM_IOCTL_VMW_FENCE_UNREF \ | |
95 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ | ||
96 | struct drm_vmw_fence_arg) | ||
97 | #define DRM_IOCTL_VMW_FENCE_EVENT \ | ||
98 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ | ||
99 | struct drm_vmw_fence_event_arg) | ||
100 | #define DRM_IOCTL_VMW_PRESENT \ | ||
101 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ | ||
102 | struct drm_vmw_present_arg) | ||
103 | #define DRM_IOCTL_VMW_PRESENT_READBACK \ | ||
104 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ | ||
105 | struct drm_vmw_present_readback_arg) | ||
95 | 106 | ||
96 | /** | 107 | /** |
97 | * The core DRM version of this macro doesn't account for | 108 | * The core DRM version of this macro doesn't account for |
@@ -135,12 +146,25 @@ static struct drm_ioctl_desc vmw_ioctls[] = { | |||
135 | DRM_AUTH | DRM_UNLOCKED), | 146 | DRM_AUTH | DRM_UNLOCKED), |
136 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, | 147 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, |
137 | DRM_AUTH | DRM_UNLOCKED), | 148 | DRM_AUTH | DRM_UNLOCKED), |
138 | VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, | 149 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, |
139 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), | 150 | DRM_AUTH | DRM_UNLOCKED), |
140 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl, | 151 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, |
152 | vmw_fence_obj_signaled_ioctl, | ||
153 | DRM_AUTH | DRM_UNLOCKED), | ||
154 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, | ||
155 | DRM_AUTH | DRM_UNLOCKED), | ||
156 | VMW_IOCTL_DEF(VMW_FENCE_EVENT, | ||
157 | vmw_fence_event_ioctl, | ||
158 | DRM_AUTH | DRM_UNLOCKED), | ||
159 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, | ||
141 | DRM_AUTH | DRM_UNLOCKED), | 160 | DRM_AUTH | DRM_UNLOCKED), |
142 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, | 161 | |
143 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) | 162 | /* these allow direct access to the framebuffers mark as master only */ |
163 | VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, | ||
164 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), | ||
165 | VMW_IOCTL_DEF(VMW_PRESENT_READBACK, | ||
166 | vmw_present_readback_ioctl, | ||
167 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), | ||
144 | }; | 168 | }; |
145 | 169 | ||
146 | static struct pci_device_id vmw_pci_id_list[] = { | 170 | static struct pci_device_id vmw_pci_id_list[] = { |
@@ -189,8 +213,78 @@ static void vmw_print_capabilities(uint32_t capabilities) | |||
189 | DRM_INFO(" GMR.\n"); | 213 | DRM_INFO(" GMR.\n"); |
190 | if (capabilities & SVGA_CAP_TRACES) | 214 | if (capabilities & SVGA_CAP_TRACES) |
191 | DRM_INFO(" Traces.\n"); | 215 | DRM_INFO(" Traces.\n"); |
216 | if (capabilities & SVGA_CAP_GMR2) | ||
217 | DRM_INFO(" GMR2.\n"); | ||
218 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) | ||
219 | DRM_INFO(" Screen Object 2.\n"); | ||
220 | } | ||
221 | |||
222 | |||
223 | /** | ||
224 | * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at | ||
225 | * the start of a buffer object. | ||
226 | * | ||
227 | * @dev_priv: The device private structure. | ||
228 | * | ||
229 | * This function will idle the buffer using an uninterruptible wait, then | ||
230 | * map the first page and initialize a pending occlusion query result structure, | ||
231 | * Finally it will unmap the buffer. | ||
232 | * | ||
233 | * TODO: Since we're only mapping a single page, we should optimize the map | ||
234 | * to use kmap_atomic / iomap_atomic. | ||
235 | */ | ||
236 | static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) | ||
237 | { | ||
238 | struct ttm_bo_kmap_obj map; | ||
239 | volatile SVGA3dQueryResult *result; | ||
240 | bool dummy; | ||
241 | int ret; | ||
242 | struct ttm_bo_device *bdev = &dev_priv->bdev; | ||
243 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; | ||
244 | |||
245 | ttm_bo_reserve(bo, false, false, false, 0); | ||
246 | spin_lock(&bdev->fence_lock); | ||
247 | ret = ttm_bo_wait(bo, false, false, false); | ||
248 | spin_unlock(&bdev->fence_lock); | ||
249 | if (unlikely(ret != 0)) | ||
250 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, | ||
251 | 10*HZ); | ||
252 | |||
253 | ret = ttm_bo_kmap(bo, 0, 1, &map); | ||
254 | if (likely(ret == 0)) { | ||
255 | result = ttm_kmap_obj_virtual(&map, &dummy); | ||
256 | result->totalSize = sizeof(*result); | ||
257 | result->state = SVGA3D_QUERYSTATE_PENDING; | ||
258 | result->result32 = 0xff; | ||
259 | ttm_bo_kunmap(&map); | ||
260 | } else | ||
261 | DRM_ERROR("Dummy query buffer map failed.\n"); | ||
262 | ttm_bo_unreserve(bo); | ||
263 | } | ||
264 | |||
265 | |||
266 | /** | ||
267 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result | ||
268 | * | ||
269 | * @dev_priv: A device private structure. | ||
270 | * | ||
271 | * This function creates a small buffer object that holds the query | ||
272 | * result for dummy queries emitted as query barriers. | ||
273 | * No interruptible waits are done within this function. | ||
274 | * | ||
275 | * Returns an error if bo creation fails. | ||
276 | */ | ||
277 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) | ||
278 | { | ||
279 | return ttm_bo_create(&dev_priv->bdev, | ||
280 | PAGE_SIZE, | ||
281 | ttm_bo_type_device, | ||
282 | &vmw_vram_sys_placement, | ||
283 | 0, 0, false, NULL, | ||
284 | &dev_priv->dummy_query_bo); | ||
192 | } | 285 | } |
193 | 286 | ||
287 | |||
194 | static int vmw_request_device(struct vmw_private *dev_priv) | 288 | static int vmw_request_device(struct vmw_private *dev_priv) |
195 | { | 289 | { |
196 | int ret; | 290 | int ret; |
@@ -200,16 +294,42 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
200 | DRM_ERROR("Unable to initialize FIFO.\n"); | 294 | DRM_ERROR("Unable to initialize FIFO.\n"); |
201 | return ret; | 295 | return ret; |
202 | } | 296 | } |
297 | vmw_fence_fifo_up(dev_priv->fman); | ||
298 | ret = vmw_dummy_query_bo_create(dev_priv); | ||
299 | if (unlikely(ret != 0)) | ||
300 | goto out_no_query_bo; | ||
301 | vmw_dummy_query_bo_prepare(dev_priv); | ||
203 | 302 | ||
204 | return 0; | 303 | return 0; |
304 | |||
305 | out_no_query_bo: | ||
306 | vmw_fence_fifo_down(dev_priv->fman); | ||
307 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | ||
308 | return ret; | ||
205 | } | 309 | } |
206 | 310 | ||
207 | static void vmw_release_device(struct vmw_private *dev_priv) | 311 | static void vmw_release_device(struct vmw_private *dev_priv) |
208 | { | 312 | { |
313 | /* | ||
314 | * Previous destructions should've released | ||
315 | * the pinned bo. | ||
316 | */ | ||
317 | |||
318 | BUG_ON(dev_priv->pinned_bo != NULL); | ||
319 | |||
320 | ttm_bo_unref(&dev_priv->dummy_query_bo); | ||
321 | vmw_fence_fifo_down(dev_priv->fman); | ||
209 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 322 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
210 | } | 323 | } |
211 | 324 | ||
212 | int vmw_3d_resource_inc(struct vmw_private *dev_priv) | 325 | /** |
326 | * Increase the 3d resource refcount. | ||
327 | * If the count was prevously zero, initialize the fifo, switching to svga | ||
328 | * mode. Note that the master holds a ref as well, and may request an | ||
329 | * explicit switch to svga mode if fb is not running, using @unhide_svga. | ||
330 | */ | ||
331 | int vmw_3d_resource_inc(struct vmw_private *dev_priv, | ||
332 | bool unhide_svga) | ||
213 | { | 333 | { |
214 | int ret = 0; | 334 | int ret = 0; |
215 | 335 | ||
@@ -218,19 +338,42 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv) | |||
218 | ret = vmw_request_device(dev_priv); | 338 | ret = vmw_request_device(dev_priv); |
219 | if (unlikely(ret != 0)) | 339 | if (unlikely(ret != 0)) |
220 | --dev_priv->num_3d_resources; | 340 | --dev_priv->num_3d_resources; |
341 | } else if (unhide_svga) { | ||
342 | mutex_lock(&dev_priv->hw_mutex); | ||
343 | vmw_write(dev_priv, SVGA_REG_ENABLE, | ||
344 | vmw_read(dev_priv, SVGA_REG_ENABLE) & | ||
345 | ~SVGA_REG_ENABLE_HIDE); | ||
346 | mutex_unlock(&dev_priv->hw_mutex); | ||
221 | } | 347 | } |
348 | |||
222 | mutex_unlock(&dev_priv->release_mutex); | 349 | mutex_unlock(&dev_priv->release_mutex); |
223 | return ret; | 350 | return ret; |
224 | } | 351 | } |
225 | 352 | ||
226 | 353 | /** | |
227 | void vmw_3d_resource_dec(struct vmw_private *dev_priv) | 354 | * Decrease the 3d resource refcount. |
355 | * If the count reaches zero, disable the fifo, switching to vga mode. | ||
356 | * Note that the master holds a refcount as well, and may request an | ||
357 | * explicit switch to vga mode when it releases its refcount to account | ||
358 | * for the situation of an X server vt switch to VGA with 3d resources | ||
359 | * active. | ||
360 | */ | ||
361 | void vmw_3d_resource_dec(struct vmw_private *dev_priv, | ||
362 | bool hide_svga) | ||
228 | { | 363 | { |
229 | int32_t n3d; | 364 | int32_t n3d; |
230 | 365 | ||
231 | mutex_lock(&dev_priv->release_mutex); | 366 | mutex_lock(&dev_priv->release_mutex); |
232 | if (unlikely(--dev_priv->num_3d_resources == 0)) | 367 | if (unlikely(--dev_priv->num_3d_resources == 0)) |
233 | vmw_release_device(dev_priv); | 368 | vmw_release_device(dev_priv); |
369 | else if (hide_svga) { | ||
370 | mutex_lock(&dev_priv->hw_mutex); | ||
371 | vmw_write(dev_priv, SVGA_REG_ENABLE, | ||
372 | vmw_read(dev_priv, SVGA_REG_ENABLE) | | ||
373 | SVGA_REG_ENABLE_HIDE); | ||
374 | mutex_unlock(&dev_priv->hw_mutex); | ||
375 | } | ||
376 | |||
234 | n3d = (int32_t) dev_priv->num_3d_resources; | 377 | n3d = (int32_t) dev_priv->num_3d_resources; |
235 | mutex_unlock(&dev_priv->release_mutex); | 378 | mutex_unlock(&dev_priv->release_mutex); |
236 | 379 | ||
@@ -252,7 +395,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
252 | 395 | ||
253 | dev_priv->dev = dev; | 396 | dev_priv->dev = dev; |
254 | dev_priv->vmw_chipset = chipset; | 397 | dev_priv->vmw_chipset = chipset; |
255 | dev_priv->last_read_sequence = (uint32_t) -100; | 398 | dev_priv->last_read_seqno = (uint32_t) -100; |
256 | mutex_init(&dev_priv->hw_mutex); | 399 | mutex_init(&dev_priv->hw_mutex); |
257 | mutex_init(&dev_priv->cmdbuf_mutex); | 400 | mutex_init(&dev_priv->cmdbuf_mutex); |
258 | mutex_init(&dev_priv->release_mutex); | 401 | mutex_init(&dev_priv->release_mutex); |
@@ -263,8 +406,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
263 | mutex_init(&dev_priv->init_mutex); | 406 | mutex_init(&dev_priv->init_mutex); |
264 | init_waitqueue_head(&dev_priv->fence_queue); | 407 | init_waitqueue_head(&dev_priv->fence_queue); |
265 | init_waitqueue_head(&dev_priv->fifo_queue); | 408 | init_waitqueue_head(&dev_priv->fifo_queue); |
266 | atomic_set(&dev_priv->fence_queue_waiters, 0); | 409 | dev_priv->fence_queue_waiters = 0; |
267 | atomic_set(&dev_priv->fifo_queue_waiters, 0); | 410 | atomic_set(&dev_priv->fifo_queue_waiters, 0); |
411 | INIT_LIST_HEAD(&dev_priv->surface_lru); | ||
412 | dev_priv->used_memory_size = 0; | ||
268 | 413 | ||
269 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); | 414 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); |
270 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | 415 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
@@ -285,6 +430,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
285 | 430 | ||
286 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); | 431 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
287 | 432 | ||
433 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); | ||
434 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); | ||
435 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); | ||
436 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); | ||
288 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 437 | if (dev_priv->capabilities & SVGA_CAP_GMR) { |
289 | dev_priv->max_gmr_descriptors = | 438 | dev_priv->max_gmr_descriptors = |
290 | vmw_read(dev_priv, | 439 | vmw_read(dev_priv, |
@@ -292,11 +441,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
292 | dev_priv->max_gmr_ids = | 441 | dev_priv->max_gmr_ids = |
293 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); | 442 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
294 | } | 443 | } |
295 | 444 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | |
296 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); | 445 | dev_priv->max_gmr_pages = |
297 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); | 446 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
298 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); | 447 | dev_priv->memory_size = |
299 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); | 448 | vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); |
449 | dev_priv->memory_size -= dev_priv->vram_size; | ||
450 | } else { | ||
451 | /* | ||
452 | * An arbitrary limit of 512MiB on surface | ||
453 | * memory. But all HWV8 hardware supports GMR2. | ||
454 | */ | ||
455 | dev_priv->memory_size = 512*1024*1024; | ||
456 | } | ||
300 | 457 | ||
301 | mutex_unlock(&dev_priv->hw_mutex); | 458 | mutex_unlock(&dev_priv->hw_mutex); |
302 | 459 | ||
@@ -308,6 +465,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
308 | DRM_INFO("Max GMR descriptors is %u\n", | 465 | DRM_INFO("Max GMR descriptors is %u\n", |
309 | (unsigned)dev_priv->max_gmr_descriptors); | 466 | (unsigned)dev_priv->max_gmr_descriptors); |
310 | } | 467 | } |
468 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | ||
469 | DRM_INFO("Max number of GMR pages is %u\n", | ||
470 | (unsigned)dev_priv->max_gmr_pages); | ||
471 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", | ||
472 | (unsigned)dev_priv->memory_size / 1024); | ||
473 | } | ||
311 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", | 474 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
312 | dev_priv->vram_start, dev_priv->vram_size / 1024); | 475 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
313 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", | 476 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
@@ -394,22 +557,34 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
394 | goto out_no_device; | 557 | goto out_no_device; |
395 | } | 558 | } |
396 | } | 559 | } |
560 | |||
561 | dev_priv->fman = vmw_fence_manager_init(dev_priv); | ||
562 | if (unlikely(dev_priv->fman == NULL)) | ||
563 | goto out_no_fman; | ||
564 | |||
565 | /* Need to start the fifo to check if we can do screen objects */ | ||
566 | ret = vmw_3d_resource_inc(dev_priv, true); | ||
567 | if (unlikely(ret != 0)) | ||
568 | goto out_no_fifo; | ||
569 | vmw_kms_save_vga(dev_priv); | ||
570 | |||
571 | /* Start kms and overlay systems, needs fifo. */ | ||
397 | ret = vmw_kms_init(dev_priv); | 572 | ret = vmw_kms_init(dev_priv); |
398 | if (unlikely(ret != 0)) | 573 | if (unlikely(ret != 0)) |
399 | goto out_no_kms; | 574 | goto out_no_kms; |
400 | vmw_overlay_init(dev_priv); | 575 | vmw_overlay_init(dev_priv); |
576 | |||
577 | /* 3D Depends on Screen Objects being used. */ | ||
578 | DRM_INFO("Detected %sdevice 3D availability.\n", | ||
579 | vmw_fifo_have_3d(dev_priv) ? | ||
580 | "" : "no "); | ||
581 | |||
582 | /* We might be done with the fifo now */ | ||
401 | if (dev_priv->enable_fb) { | 583 | if (dev_priv->enable_fb) { |
402 | ret = vmw_3d_resource_inc(dev_priv); | ||
403 | if (unlikely(ret != 0)) | ||
404 | goto out_no_fifo; | ||
405 | vmw_kms_save_vga(dev_priv); | ||
406 | vmw_fb_init(dev_priv); | 584 | vmw_fb_init(dev_priv); |
407 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? | ||
408 | "Detected device 3D availability.\n" : | ||
409 | "Detected no device 3D availability.\n"); | ||
410 | } else { | 585 | } else { |
411 | DRM_INFO("Delayed 3D detection since we're not " | 586 | vmw_kms_restore_vga(dev_priv); |
412 | "running the device in SVGA mode yet.\n"); | 587 | vmw_3d_resource_dec(dev_priv, true); |
413 | } | 588 | } |
414 | 589 | ||
415 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | 590 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { |
@@ -426,15 +601,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
426 | return 0; | 601 | return 0; |
427 | 602 | ||
428 | out_no_irq: | 603 | out_no_irq: |
429 | if (dev_priv->enable_fb) { | 604 | if (dev_priv->enable_fb) |
430 | vmw_fb_close(dev_priv); | 605 | vmw_fb_close(dev_priv); |
431 | vmw_kms_restore_vga(dev_priv); | ||
432 | vmw_3d_resource_dec(dev_priv); | ||
433 | } | ||
434 | out_no_fifo: | ||
435 | vmw_overlay_close(dev_priv); | 606 | vmw_overlay_close(dev_priv); |
436 | vmw_kms_close(dev_priv); | 607 | vmw_kms_close(dev_priv); |
437 | out_no_kms: | 608 | out_no_kms: |
609 | /* We still have a 3D resource reference held */ | ||
610 | if (dev_priv->enable_fb) { | ||
611 | vmw_kms_restore_vga(dev_priv); | ||
612 | vmw_3d_resource_dec(dev_priv, false); | ||
613 | } | ||
614 | out_no_fifo: | ||
615 | vmw_fence_manager_takedown(dev_priv->fman); | ||
616 | out_no_fman: | ||
438 | if (dev_priv->stealth) | 617 | if (dev_priv->stealth) |
439 | pci_release_region(dev->pdev, 2); | 618 | pci_release_region(dev->pdev, 2); |
440 | else | 619 | else |
@@ -467,15 +646,18 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
467 | 646 | ||
468 | unregister_pm_notifier(&dev_priv->pm_nb); | 647 | unregister_pm_notifier(&dev_priv->pm_nb); |
469 | 648 | ||
649 | if (dev_priv->ctx.cmd_bounce) | ||
650 | vfree(dev_priv->ctx.cmd_bounce); | ||
470 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 651 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
471 | drm_irq_uninstall(dev_priv->dev); | 652 | drm_irq_uninstall(dev_priv->dev); |
472 | if (dev_priv->enable_fb) { | 653 | if (dev_priv->enable_fb) { |
473 | vmw_fb_close(dev_priv); | 654 | vmw_fb_close(dev_priv); |
474 | vmw_kms_restore_vga(dev_priv); | 655 | vmw_kms_restore_vga(dev_priv); |
475 | vmw_3d_resource_dec(dev_priv); | 656 | vmw_3d_resource_dec(dev_priv, false); |
476 | } | 657 | } |
477 | vmw_kms_close(dev_priv); | 658 | vmw_kms_close(dev_priv); |
478 | vmw_overlay_close(dev_priv); | 659 | vmw_overlay_close(dev_priv); |
660 | vmw_fence_manager_takedown(dev_priv->fman); | ||
479 | if (dev_priv->stealth) | 661 | if (dev_priv->stealth) |
480 | pci_release_region(dev->pdev, 2); | 662 | pci_release_region(dev->pdev, 2); |
481 | else | 663 | else |
@@ -646,7 +828,7 @@ static int vmw_master_set(struct drm_device *dev, | |||
646 | int ret = 0; | 828 | int ret = 0; |
647 | 829 | ||
648 | if (!dev_priv->enable_fb) { | 830 | if (!dev_priv->enable_fb) { |
649 | ret = vmw_3d_resource_inc(dev_priv); | 831 | ret = vmw_3d_resource_inc(dev_priv, true); |
650 | if (unlikely(ret != 0)) | 832 | if (unlikely(ret != 0)) |
651 | return ret; | 833 | return ret; |
652 | vmw_kms_save_vga(dev_priv); | 834 | vmw_kms_save_vga(dev_priv); |
@@ -688,7 +870,7 @@ out_no_active_lock: | |||
688 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | 870 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
689 | mutex_unlock(&dev_priv->hw_mutex); | 871 | mutex_unlock(&dev_priv->hw_mutex); |
690 | vmw_kms_restore_vga(dev_priv); | 872 | vmw_kms_restore_vga(dev_priv); |
691 | vmw_3d_resource_dec(dev_priv); | 873 | vmw_3d_resource_dec(dev_priv, true); |
692 | } | 874 | } |
693 | return ret; | 875 | return ret; |
694 | } | 876 | } |
@@ -709,7 +891,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
709 | 891 | ||
710 | vmw_fp->locked_master = drm_master_get(file_priv->master); | 892 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
711 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | 893 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
712 | vmw_kms_idle_workqueues(vmaster); | 894 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); |
713 | 895 | ||
714 | if (unlikely((ret != 0))) { | 896 | if (unlikely((ret != 0))) { |
715 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | 897 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
@@ -726,7 +908,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
726 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | 908 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
727 | mutex_unlock(&dev_priv->hw_mutex); | 909 | mutex_unlock(&dev_priv->hw_mutex); |
728 | vmw_kms_restore_vga(dev_priv); | 910 | vmw_kms_restore_vga(dev_priv); |
729 | vmw_3d_resource_dec(dev_priv); | 911 | vmw_3d_resource_dec(dev_priv, true); |
730 | } | 912 | } |
731 | 913 | ||
732 | dev_priv->active_master = &dev_priv->fbdev_master; | 914 | dev_priv->active_master = &dev_priv->fbdev_master; |
@@ -761,6 +943,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | |||
761 | * This empties VRAM and unbinds all GMR bindings. | 943 | * This empties VRAM and unbinds all GMR bindings. |
762 | * Buffer contents is moved to swappable memory. | 944 | * Buffer contents is moved to swappable memory. |
763 | */ | 945 | */ |
946 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | ||
764 | ttm_bo_swapout_all(&dev_priv->bdev); | 947 | ttm_bo_swapout_all(&dev_priv->bdev); |
765 | 948 | ||
766 | break; | 949 | break; |
@@ -835,7 +1018,7 @@ static int vmw_pm_prepare(struct device *kdev) | |||
835 | */ | 1018 | */ |
836 | dev_priv->suspended = true; | 1019 | dev_priv->suspended = true; |
837 | if (dev_priv->enable_fb) | 1020 | if (dev_priv->enable_fb) |
838 | vmw_3d_resource_dec(dev_priv); | 1021 | vmw_3d_resource_dec(dev_priv, true); |
839 | 1022 | ||
840 | if (dev_priv->num_3d_resources != 0) { | 1023 | if (dev_priv->num_3d_resources != 0) { |
841 | 1024 | ||
@@ -843,7 +1026,7 @@ static int vmw_pm_prepare(struct device *kdev) | |||
843 | "while 3D resources are active.\n"); | 1026 | "while 3D resources are active.\n"); |
844 | 1027 | ||
845 | if (dev_priv->enable_fb) | 1028 | if (dev_priv->enable_fb) |
846 | vmw_3d_resource_inc(dev_priv); | 1029 | vmw_3d_resource_inc(dev_priv, true); |
847 | dev_priv->suspended = false; | 1030 | dev_priv->suspended = false; |
848 | return -EBUSY; | 1031 | return -EBUSY; |
849 | } | 1032 | } |
@@ -862,7 +1045,7 @@ static void vmw_pm_complete(struct device *kdev) | |||
862 | * start fifo. | 1045 | * start fifo. |
863 | */ | 1046 | */ |
864 | if (dev_priv->enable_fb) | 1047 | if (dev_priv->enable_fb) |
865 | vmw_3d_resource_inc(dev_priv); | 1048 | vmw_3d_resource_inc(dev_priv, false); |
866 | 1049 | ||
867 | dev_priv->suspended = false; | 1050 | dev_priv->suspended = false; |
868 | } | 1051 | } |
@@ -886,6 +1069,8 @@ static struct drm_driver driver = { | |||
886 | .irq_uninstall = vmw_irq_uninstall, | 1069 | .irq_uninstall = vmw_irq_uninstall, |
887 | .irq_handler = vmw_irq_handler, | 1070 | .irq_handler = vmw_irq_handler, |
888 | .get_vblank_counter = vmw_get_vblank_counter, | 1071 | .get_vblank_counter = vmw_get_vblank_counter, |
1072 | .enable_vblank = vmw_enable_vblank, | ||
1073 | .disable_vblank = vmw_disable_vblank, | ||
889 | .reclaim_buffers_locked = NULL, | 1074 | .reclaim_buffers_locked = NULL, |
890 | .ioctls = vmw_ioctls, | 1075 | .ioctls = vmw_ioctls, |
891 | .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), | 1076 | .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), |
@@ -902,7 +1087,8 @@ static struct drm_driver driver = { | |||
902 | .release = drm_release, | 1087 | .release = drm_release, |
903 | .unlocked_ioctl = vmw_unlocked_ioctl, | 1088 | .unlocked_ioctl = vmw_unlocked_ioctl, |
904 | .mmap = vmw_mmap, | 1089 | .mmap = vmw_mmap, |
905 | .poll = drm_poll, | 1090 | .poll = vmw_fops_poll, |
1091 | .read = vmw_fops_read, | ||
906 | .fasync = drm_fasync, | 1092 | .fasync = drm_fasync, |
907 | #if defined(CONFIG_COMPAT) | 1093 | #if defined(CONFIG_COMPAT) |
908 | .compat_ioctl = drm_compat_ioctl, | 1094 | .compat_ioctl = drm_compat_ioctl, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 10fc01f69c40..30589d0aecd9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -38,20 +38,27 @@ | |||
38 | #include "ttm/ttm_lock.h" | 38 | #include "ttm/ttm_lock.h" |
39 | #include "ttm/ttm_execbuf_util.h" | 39 | #include "ttm/ttm_execbuf_util.h" |
40 | #include "ttm/ttm_module.h" | 40 | #include "ttm/ttm_module.h" |
41 | #include "vmwgfx_fence.h" | ||
41 | 42 | ||
42 | #define VMWGFX_DRIVER_DATE "20100927" | 43 | #define VMWGFX_DRIVER_DATE "20111008" |
43 | #define VMWGFX_DRIVER_MAJOR 1 | 44 | #define VMWGFX_DRIVER_MAJOR 2 |
44 | #define VMWGFX_DRIVER_MINOR 4 | 45 | #define VMWGFX_DRIVER_MINOR 2 |
45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
48 | #define VMWGFX_MAX_RELOCATIONS 2048 | 49 | #define VMWGFX_MAX_RELOCATIONS 2048 |
49 | #define VMWGFX_MAX_GMRS 2048 | 50 | #define VMWGFX_MAX_VALIDATIONS 2048 |
50 | #define VMWGFX_MAX_DISPLAYS 16 | 51 | #define VMWGFX_MAX_DISPLAYS 16 |
52 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 | ||
51 | 53 | ||
52 | #define VMW_PL_GMR TTM_PL_PRIV0 | 54 | #define VMW_PL_GMR TTM_PL_PRIV0 |
53 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 | 55 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 |
54 | 56 | ||
57 | #define VMW_RES_CONTEXT ttm_driver_type0 | ||
58 | #define VMW_RES_SURFACE ttm_driver_type1 | ||
59 | #define VMW_RES_STREAM ttm_driver_type2 | ||
60 | #define VMW_RES_FENCE ttm_driver_type3 | ||
61 | |||
55 | struct vmw_fpriv { | 62 | struct vmw_fpriv { |
56 | struct drm_master *locked_master; | 63 | struct drm_master *locked_master; |
57 | struct ttm_object_file *tfile; | 64 | struct ttm_object_file *tfile; |
@@ -72,9 +79,11 @@ struct vmw_resource { | |||
72 | int id; | 79 | int id; |
73 | enum ttm_object_type res_type; | 80 | enum ttm_object_type res_type; |
74 | bool avail; | 81 | bool avail; |
82 | void (*remove_from_lists) (struct vmw_resource *res); | ||
75 | void (*hw_destroy) (struct vmw_resource *res); | 83 | void (*hw_destroy) (struct vmw_resource *res); |
76 | void (*res_free) (struct vmw_resource *res); | 84 | void (*res_free) (struct vmw_resource *res); |
77 | 85 | struct list_head validate_head; | |
86 | struct list_head query_head; /* Protected by the cmdbuf mutex */ | ||
78 | /* TODO is a generic snooper needed? */ | 87 | /* TODO is a generic snooper needed? */ |
79 | #if 0 | 88 | #if 0 |
80 | void (*snoop)(struct vmw_resource *res, | 89 | void (*snoop)(struct vmw_resource *res, |
@@ -90,8 +99,12 @@ struct vmw_cursor_snooper { | |||
90 | uint32_t *image; | 99 | uint32_t *image; |
91 | }; | 100 | }; |
92 | 101 | ||
102 | struct vmw_framebuffer; | ||
103 | struct vmw_surface_offset; | ||
104 | |||
93 | struct vmw_surface { | 105 | struct vmw_surface { |
94 | struct vmw_resource res; | 106 | struct vmw_resource res; |
107 | struct list_head lru_head; /* Protected by the resource lock */ | ||
95 | uint32_t flags; | 108 | uint32_t flags; |
96 | uint32_t format; | 109 | uint32_t format; |
97 | uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; | 110 | uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; |
@@ -102,9 +115,12 @@ struct vmw_surface { | |||
102 | 115 | ||
103 | /* TODO so far just a extra pointer */ | 116 | /* TODO so far just a extra pointer */ |
104 | struct vmw_cursor_snooper snooper; | 117 | struct vmw_cursor_snooper snooper; |
118 | struct ttm_buffer_object *backup; | ||
119 | struct vmw_surface_offset *offsets; | ||
120 | uint32_t backup_size; | ||
105 | }; | 121 | }; |
106 | 122 | ||
107 | struct vmw_fence_queue { | 123 | struct vmw_marker_queue { |
108 | struct list_head head; | 124 | struct list_head head; |
109 | struct timespec lag; | 125 | struct timespec lag; |
110 | struct timespec lag_time; | 126 | struct timespec lag_time; |
@@ -115,16 +131,12 @@ struct vmw_fifo_state { | |||
115 | unsigned long reserved_size; | 131 | unsigned long reserved_size; |
116 | __le32 *dynamic_buffer; | 132 | __le32 *dynamic_buffer; |
117 | __le32 *static_buffer; | 133 | __le32 *static_buffer; |
118 | __le32 *last_buffer; | ||
119 | uint32_t last_data_size; | ||
120 | uint32_t last_buffer_size; | ||
121 | bool last_buffer_add; | ||
122 | unsigned long static_buffer_size; | 134 | unsigned long static_buffer_size; |
123 | bool using_bounce_buffer; | 135 | bool using_bounce_buffer; |
124 | uint32_t capabilities; | 136 | uint32_t capabilities; |
125 | struct mutex fifo_mutex; | 137 | struct mutex fifo_mutex; |
126 | struct rw_semaphore rwsem; | 138 | struct rw_semaphore rwsem; |
127 | struct vmw_fence_queue fence_queue; | 139 | struct vmw_marker_queue marker_queue; |
128 | }; | 140 | }; |
129 | 141 | ||
130 | struct vmw_relocation { | 142 | struct vmw_relocation { |
@@ -136,6 +148,8 @@ struct vmw_sw_context{ | |||
136 | struct ida bo_list; | 148 | struct ida bo_list; |
137 | uint32_t last_cid; | 149 | uint32_t last_cid; |
138 | bool cid_valid; | 150 | bool cid_valid; |
151 | bool kernel; /**< is the called made from the kernel */ | ||
152 | struct vmw_resource *cur_ctx; | ||
139 | uint32_t last_sid; | 153 | uint32_t last_sid; |
140 | uint32_t sid_translation; | 154 | uint32_t sid_translation; |
141 | bool sid_valid; | 155 | bool sid_valid; |
@@ -143,8 +157,16 @@ struct vmw_sw_context{ | |||
143 | struct list_head validate_nodes; | 157 | struct list_head validate_nodes; |
144 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; | 158 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; |
145 | uint32_t cur_reloc; | 159 | uint32_t cur_reloc; |
146 | struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS]; | 160 | struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; |
147 | uint32_t cur_val_buf; | 161 | uint32_t cur_val_buf; |
162 | uint32_t *cmd_bounce; | ||
163 | uint32_t cmd_bounce_size; | ||
164 | struct list_head resource_list; | ||
165 | uint32_t fence_flags; | ||
166 | struct list_head query_list; | ||
167 | struct ttm_buffer_object *cur_query_bo; | ||
168 | uint32_t cur_query_cid; | ||
169 | bool query_cid_valid; | ||
148 | }; | 170 | }; |
149 | 171 | ||
150 | struct vmw_legacy_display; | 172 | struct vmw_legacy_display; |
@@ -185,6 +207,8 @@ struct vmw_private { | |||
185 | uint32_t capabilities; | 207 | uint32_t capabilities; |
186 | uint32_t max_gmr_descriptors; | 208 | uint32_t max_gmr_descriptors; |
187 | uint32_t max_gmr_ids; | 209 | uint32_t max_gmr_ids; |
210 | uint32_t max_gmr_pages; | ||
211 | uint32_t memory_size; | ||
188 | bool has_gmr; | 212 | bool has_gmr; |
189 | struct mutex hw_mutex; | 213 | struct mutex hw_mutex; |
190 | 214 | ||
@@ -195,12 +219,7 @@ struct vmw_private { | |||
195 | struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; | 219 | struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; |
196 | uint32_t vga_width; | 220 | uint32_t vga_width; |
197 | uint32_t vga_height; | 221 | uint32_t vga_height; |
198 | uint32_t vga_depth; | ||
199 | uint32_t vga_bpp; | 222 | uint32_t vga_bpp; |
200 | uint32_t vga_pseudo; | ||
201 | uint32_t vga_red_mask; | ||
202 | uint32_t vga_green_mask; | ||
203 | uint32_t vga_blue_mask; | ||
204 | uint32_t vga_bpl; | 223 | uint32_t vga_bpl; |
205 | uint32_t vga_pitchlock; | 224 | uint32_t vga_pitchlock; |
206 | 225 | ||
@@ -212,6 +231,7 @@ struct vmw_private { | |||
212 | 231 | ||
213 | void *fb_info; | 232 | void *fb_info; |
214 | struct vmw_legacy_display *ldu_priv; | 233 | struct vmw_legacy_display *ldu_priv; |
234 | struct vmw_screen_object_display *sou_priv; | ||
215 | struct vmw_overlay *overlay_priv; | 235 | struct vmw_overlay *overlay_priv; |
216 | 236 | ||
217 | /* | 237 | /* |
@@ -240,13 +260,16 @@ struct vmw_private { | |||
240 | * Fencing and IRQs. | 260 | * Fencing and IRQs. |
241 | */ | 261 | */ |
242 | 262 | ||
243 | atomic_t fence_seq; | 263 | atomic_t marker_seq; |
244 | wait_queue_head_t fence_queue; | 264 | wait_queue_head_t fence_queue; |
245 | wait_queue_head_t fifo_queue; | 265 | wait_queue_head_t fifo_queue; |
246 | atomic_t fence_queue_waiters; | 266 | int fence_queue_waiters; /* Protected by hw_mutex */ |
267 | int goal_queue_waiters; /* Protected by hw_mutex */ | ||
247 | atomic_t fifo_queue_waiters; | 268 | atomic_t fifo_queue_waiters; |
248 | uint32_t last_read_sequence; | 269 | uint32_t last_read_seqno; |
249 | spinlock_t irq_lock; | 270 | spinlock_t irq_lock; |
271 | struct vmw_fence_manager *fman; | ||
272 | uint32_t irq_mask; | ||
250 | 273 | ||
251 | /* | 274 | /* |
252 | * Device state | 275 | * Device state |
@@ -285,6 +308,26 @@ struct vmw_private { | |||
285 | 308 | ||
286 | struct mutex release_mutex; | 309 | struct mutex release_mutex; |
287 | uint32_t num_3d_resources; | 310 | uint32_t num_3d_resources; |
311 | |||
312 | /* | ||
313 | * Query processing. These members | ||
314 | * are protected by the cmdbuf mutex. | ||
315 | */ | ||
316 | |||
317 | struct ttm_buffer_object *dummy_query_bo; | ||
318 | struct ttm_buffer_object *pinned_bo; | ||
319 | uint32_t query_cid; | ||
320 | bool dummy_query_bo_pinned; | ||
321 | |||
322 | /* | ||
323 | * Surface swapping. The "surface_lru" list is protected by the | ||
324 | * resource lock in order to be able to destroy a surface and take | ||
325 | * it off the lru atomically. "used_memory_size" is currently | ||
326 | * protected by the cmdbuf mutex for simplicity. | ||
327 | */ | ||
328 | |||
329 | struct list_head surface_lru; | ||
330 | uint32_t used_memory_size; | ||
288 | }; | 331 | }; |
289 | 332 | ||
290 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) | 333 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
@@ -319,8 +362,8 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, | |||
319 | return val; | 362 | return val; |
320 | } | 363 | } |
321 | 364 | ||
322 | int vmw_3d_resource_inc(struct vmw_private *dev_priv); | 365 | int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga); |
323 | void vmw_3d_resource_dec(struct vmw_private *dev_priv); | 366 | void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga); |
324 | 367 | ||
325 | /** | 368 | /** |
326 | * GMR utilities - vmwgfx_gmr.c | 369 | * GMR utilities - vmwgfx_gmr.c |
@@ -345,7 +388,8 @@ extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | |||
345 | struct drm_file *file_priv); | 388 | struct drm_file *file_priv); |
346 | extern int vmw_context_check(struct vmw_private *dev_priv, | 389 | extern int vmw_context_check(struct vmw_private *dev_priv, |
347 | struct ttm_object_file *tfile, | 390 | struct ttm_object_file *tfile, |
348 | int id); | 391 | int id, |
392 | struct vmw_resource **p_res); | ||
349 | extern void vmw_surface_res_free(struct vmw_resource *res); | 393 | extern void vmw_surface_res_free(struct vmw_resource *res); |
350 | extern int vmw_surface_init(struct vmw_private *dev_priv, | 394 | extern int vmw_surface_init(struct vmw_private *dev_priv, |
351 | struct vmw_surface *srf, | 395 | struct vmw_surface *srf, |
@@ -363,6 +407,8 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
363 | extern int vmw_surface_check(struct vmw_private *dev_priv, | 407 | extern int vmw_surface_check(struct vmw_private *dev_priv, |
364 | struct ttm_object_file *tfile, | 408 | struct ttm_object_file *tfile, |
365 | uint32_t handle, int *id); | 409 | uint32_t handle, int *id); |
410 | extern int vmw_surface_validate(struct vmw_private *dev_priv, | ||
411 | struct vmw_surface *srf); | ||
366 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); | 412 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); |
367 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, | 413 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, |
368 | struct vmw_dma_buffer *vmw_bo, | 414 | struct vmw_dma_buffer *vmw_bo, |
@@ -378,10 +424,6 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | |||
378 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); | 424 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); |
379 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | 425 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
380 | uint32_t id, struct vmw_dma_buffer **out); | 426 | uint32_t id, struct vmw_dma_buffer **out); |
381 | extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | ||
382 | struct vmw_dma_buffer *bo); | ||
383 | extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, | ||
384 | struct vmw_dma_buffer *bo); | ||
385 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | 427 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
386 | struct drm_file *file_priv); | 428 | struct drm_file *file_priv); |
387 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | 429 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
@@ -390,7 +432,30 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, | |||
390 | struct ttm_object_file *tfile, | 432 | struct ttm_object_file *tfile, |
391 | uint32_t *inout_id, | 433 | uint32_t *inout_id, |
392 | struct vmw_resource **out); | 434 | struct vmw_resource **out); |
435 | extern void vmw_resource_unreserve(struct list_head *list); | ||
393 | 436 | ||
437 | /** | ||
438 | * DMA buffer helper routines - vmwgfx_dmabuf.c | ||
439 | */ | ||
440 | extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv, | ||
441 | struct vmw_dma_buffer *bo, | ||
442 | struct ttm_placement *placement, | ||
443 | bool interruptible); | ||
444 | extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv, | ||
445 | struct vmw_dma_buffer *buf, | ||
446 | bool pin, bool interruptible); | ||
447 | extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, | ||
448 | struct vmw_dma_buffer *buf, | ||
449 | bool pin, bool interruptible); | ||
450 | extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | ||
451 | struct vmw_dma_buffer *bo, | ||
452 | bool pin, bool interruptible); | ||
453 | extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, | ||
454 | struct vmw_dma_buffer *bo, | ||
455 | bool interruptible); | ||
456 | extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, | ||
457 | SVGAGuestPtr *ptr); | ||
458 | extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin); | ||
394 | 459 | ||
395 | /** | 460 | /** |
396 | * Misc Ioctl functionality - vmwgfx_ioctl.c | 461 | * Misc Ioctl functionality - vmwgfx_ioctl.c |
@@ -398,8 +463,16 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, | |||
398 | 463 | ||
399 | extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, | 464 | extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, |
400 | struct drm_file *file_priv); | 465 | struct drm_file *file_priv); |
401 | extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data, | 466 | extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, |
402 | struct drm_file *file_priv); | 467 | struct drm_file *file_priv); |
468 | extern int vmw_present_ioctl(struct drm_device *dev, void *data, | ||
469 | struct drm_file *file_priv); | ||
470 | extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, | ||
471 | struct drm_file *file_priv); | ||
472 | extern unsigned int vmw_fops_poll(struct file *filp, | ||
473 | struct poll_table_struct *wait); | ||
474 | extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, | ||
475 | size_t count, loff_t *offset); | ||
403 | 476 | ||
404 | /** | 477 | /** |
405 | * Fifo utilities - vmwgfx_fifo.c | 478 | * Fifo utilities - vmwgfx_fifo.c |
@@ -412,11 +485,12 @@ extern void vmw_fifo_release(struct vmw_private *dev_priv, | |||
412 | extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); | 485 | extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); |
413 | extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); | 486 | extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); |
414 | extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, | 487 | extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, |
415 | uint32_t *sequence); | 488 | uint32_t *seqno); |
416 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); | 489 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); |
417 | extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); | ||
418 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); | 490 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); |
419 | extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); | 491 | extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); |
492 | extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | ||
493 | uint32_t cid); | ||
420 | 494 | ||
421 | /** | 495 | /** |
422 | * TTM glue - vmwgfx_ttm_glue.c | 496 | * TTM glue - vmwgfx_ttm_glue.c |
@@ -434,7 +508,10 @@ extern struct ttm_placement vmw_vram_placement; | |||
434 | extern struct ttm_placement vmw_vram_ne_placement; | 508 | extern struct ttm_placement vmw_vram_ne_placement; |
435 | extern struct ttm_placement vmw_vram_sys_placement; | 509 | extern struct ttm_placement vmw_vram_sys_placement; |
436 | extern struct ttm_placement vmw_vram_gmr_placement; | 510 | extern struct ttm_placement vmw_vram_gmr_placement; |
511 | extern struct ttm_placement vmw_vram_gmr_ne_placement; | ||
437 | extern struct ttm_placement vmw_sys_placement; | 512 | extern struct ttm_placement vmw_sys_placement; |
513 | extern struct ttm_placement vmw_evictable_placement; | ||
514 | extern struct ttm_placement vmw_srf_placement; | ||
438 | extern struct ttm_bo_driver vmw_bo_driver; | 515 | extern struct ttm_bo_driver vmw_bo_driver; |
439 | extern int vmw_dma_quiescent(struct drm_device *dev); | 516 | extern int vmw_dma_quiescent(struct drm_device *dev); |
440 | 517 | ||
@@ -444,45 +521,70 @@ extern int vmw_dma_quiescent(struct drm_device *dev); | |||
444 | 521 | ||
445 | extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | 522 | extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, |
446 | struct drm_file *file_priv); | 523 | struct drm_file *file_priv); |
524 | extern int vmw_execbuf_process(struct drm_file *file_priv, | ||
525 | struct vmw_private *dev_priv, | ||
526 | void __user *user_commands, | ||
527 | void *kernel_commands, | ||
528 | uint32_t command_size, | ||
529 | uint64_t throttle_us, | ||
530 | struct drm_vmw_fence_rep __user | ||
531 | *user_fence_rep); | ||
532 | |||
533 | extern void | ||
534 | vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | ||
535 | bool only_on_cid_match, uint32_t cid); | ||
536 | |||
537 | extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, | ||
538 | struct vmw_private *dev_priv, | ||
539 | struct vmw_fence_obj **p_fence, | ||
540 | uint32_t *p_handle); | ||
541 | extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | ||
542 | struct vmw_fpriv *vmw_fp, | ||
543 | int ret, | ||
544 | struct drm_vmw_fence_rep __user | ||
545 | *user_fence_rep, | ||
546 | struct vmw_fence_obj *fence, | ||
547 | uint32_t fence_handle); | ||
447 | 548 | ||
448 | /** | 549 | /** |
449 | * IRQs and wating - vmwgfx_irq.c | 550 | * IRQs and wating - vmwgfx_irq.c |
450 | */ | 551 | */ |
451 | 552 | ||
452 | extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS); | 553 | extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS); |
453 | extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy, | 554 | extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, |
454 | uint32_t sequence, bool interruptible, | 555 | uint32_t seqno, bool interruptible, |
455 | unsigned long timeout); | 556 | unsigned long timeout); |
456 | extern void vmw_irq_preinstall(struct drm_device *dev); | 557 | extern void vmw_irq_preinstall(struct drm_device *dev); |
457 | extern int vmw_irq_postinstall(struct drm_device *dev); | 558 | extern int vmw_irq_postinstall(struct drm_device *dev); |
458 | extern void vmw_irq_uninstall(struct drm_device *dev); | 559 | extern void vmw_irq_uninstall(struct drm_device *dev); |
459 | extern bool vmw_fence_signaled(struct vmw_private *dev_priv, | 560 | extern bool vmw_seqno_passed(struct vmw_private *dev_priv, |
460 | uint32_t sequence); | 561 | uint32_t seqno); |
461 | extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data, | ||
462 | struct drm_file *file_priv); | ||
463 | extern int vmw_fallback_wait(struct vmw_private *dev_priv, | 562 | extern int vmw_fallback_wait(struct vmw_private *dev_priv, |
464 | bool lazy, | 563 | bool lazy, |
465 | bool fifo_idle, | 564 | bool fifo_idle, |
466 | uint32_t sequence, | 565 | uint32_t seqno, |
467 | bool interruptible, | 566 | bool interruptible, |
468 | unsigned long timeout); | 567 | unsigned long timeout); |
469 | extern void vmw_update_sequence(struct vmw_private *dev_priv, | 568 | extern void vmw_update_seqno(struct vmw_private *dev_priv, |
470 | struct vmw_fifo_state *fifo_state); | 569 | struct vmw_fifo_state *fifo_state); |
471 | 570 | extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); | |
571 | extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); | ||
572 | extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); | ||
573 | extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); | ||
472 | 574 | ||
473 | /** | 575 | /** |
474 | * Rudimentary fence objects currently used only for throttling - | 576 | * Rudimentary fence-like objects currently used only for throttling - |
475 | * vmwgfx_fence.c | 577 | * vmwgfx_marker.c |
476 | */ | 578 | */ |
477 | 579 | ||
478 | extern void vmw_fence_queue_init(struct vmw_fence_queue *queue); | 580 | extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); |
479 | extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue); | 581 | extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); |
480 | extern int vmw_fence_push(struct vmw_fence_queue *queue, | 582 | extern int vmw_marker_push(struct vmw_marker_queue *queue, |
481 | uint32_t sequence); | 583 | uint32_t seqno); |
482 | extern int vmw_fence_pull(struct vmw_fence_queue *queue, | 584 | extern int vmw_marker_pull(struct vmw_marker_queue *queue, |
483 | uint32_t signaled_sequence); | 585 | uint32_t signaled_seqno); |
484 | extern int vmw_wait_lag(struct vmw_private *dev_priv, | 586 | extern int vmw_wait_lag(struct vmw_private *dev_priv, |
485 | struct vmw_fence_queue *queue, uint32_t us); | 587 | struct vmw_marker_queue *queue, uint32_t us); |
486 | 588 | ||
487 | /** | 589 | /** |
488 | * Kernel framebuffer - vmwgfx_fb.c | 590 | * Kernel framebuffer - vmwgfx_fb.c |
@@ -508,16 +610,29 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, | |||
508 | struct ttm_object_file *tfile, | 610 | struct ttm_object_file *tfile, |
509 | struct ttm_buffer_object *bo, | 611 | struct ttm_buffer_object *bo, |
510 | SVGA3dCmdHeader *header); | 612 | SVGA3dCmdHeader *header); |
511 | void vmw_kms_write_svga(struct vmw_private *vmw_priv, | 613 | int vmw_kms_write_svga(struct vmw_private *vmw_priv, |
512 | unsigned width, unsigned height, unsigned pitch, | 614 | unsigned width, unsigned height, unsigned pitch, |
513 | unsigned bbp, unsigned depth); | 615 | unsigned bpp, unsigned depth); |
514 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | ||
515 | struct drm_file *file_priv); | ||
516 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster); | 616 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster); |
517 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, | 617 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, |
518 | uint32_t pitch, | 618 | uint32_t pitch, |
519 | uint32_t height); | 619 | uint32_t height); |
520 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); | 620 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); |
621 | int vmw_enable_vblank(struct drm_device *dev, int crtc); | ||
622 | void vmw_disable_vblank(struct drm_device *dev, int crtc); | ||
623 | int vmw_kms_present(struct vmw_private *dev_priv, | ||
624 | struct drm_file *file_priv, | ||
625 | struct vmw_framebuffer *vfb, | ||
626 | struct vmw_surface *surface, | ||
627 | uint32_t sid, int32_t destX, int32_t destY, | ||
628 | struct drm_vmw_rect *clips, | ||
629 | uint32_t num_clips); | ||
630 | int vmw_kms_readback(struct vmw_private *dev_priv, | ||
631 | struct drm_file *file_priv, | ||
632 | struct vmw_framebuffer *vfb, | ||
633 | struct drm_vmw_fence_rep __user *user_fence_rep, | ||
634 | struct drm_vmw_rect *clips, | ||
635 | uint32_t num_clips); | ||
521 | 636 | ||
522 | /** | 637 | /** |
523 | * Overlay control - vmwgfx_overlay.c | 638 | * Overlay control - vmwgfx_overlay.c |
@@ -576,4 +691,8 @@ static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer | |||
576 | return NULL; | 691 | return NULL; |
577 | } | 692 | } |
578 | 693 | ||
694 | static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) | ||
695 | { | ||
696 | return (struct ttm_mem_global *) dev_priv->mem_global_ref.object; | ||
697 | } | ||
579 | #endif | 698 | #endif |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 41b95ed6dbcd..40932fbdac0f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -44,10 +44,71 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
44 | return 0; | 44 | return 0; |
45 | } | 45 | } |
46 | 46 | ||
47 | static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context, | ||
48 | struct vmw_resource **p_res) | ||
49 | { | ||
50 | struct vmw_resource *res = *p_res; | ||
51 | |||
52 | if (list_empty(&res->validate_head)) { | ||
53 | list_add_tail(&res->validate_head, &sw_context->resource_list); | ||
54 | *p_res = NULL; | ||
55 | } else | ||
56 | vmw_resource_unreference(p_res); | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * vmw_bo_to_validate_list - add a bo to a validate list | ||
61 | * | ||
62 | * @sw_context: The software context used for this command submission batch. | ||
63 | * @bo: The buffer object to add. | ||
64 | * @fence_flags: Fence flags to be or'ed with any other fence flags for | ||
65 | * this buffer on this submission batch. | ||
66 | * @p_val_node: If non-NULL Will be updated with the validate node number | ||
67 | * on return. | ||
68 | * | ||
69 | * Returns -EINVAL if the limit of number of buffer objects per command | ||
70 | * submission is reached. | ||
71 | */ | ||
72 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | ||
73 | struct ttm_buffer_object *bo, | ||
74 | uint32_t fence_flags, | ||
75 | uint32_t *p_val_node) | ||
76 | { | ||
77 | uint32_t val_node; | ||
78 | struct ttm_validate_buffer *val_buf; | ||
79 | |||
80 | val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | ||
81 | |||
82 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { | ||
83 | DRM_ERROR("Max number of DMA buffers per submission" | ||
84 | " exceeded.\n"); | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | |||
88 | val_buf = &sw_context->val_bufs[val_node]; | ||
89 | if (unlikely(val_node == sw_context->cur_val_buf)) { | ||
90 | val_buf->new_sync_obj_arg = NULL; | ||
91 | val_buf->bo = ttm_bo_reference(bo); | ||
92 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | ||
93 | ++sw_context->cur_val_buf; | ||
94 | } | ||
95 | |||
96 | val_buf->new_sync_obj_arg = (void *) | ||
97 | ((unsigned long) val_buf->new_sync_obj_arg | fence_flags); | ||
98 | sw_context->fence_flags |= fence_flags; | ||
99 | |||
100 | if (p_val_node) | ||
101 | *p_val_node = val_node; | ||
102 | |||
103 | return 0; | ||
104 | } | ||
105 | |||
47 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | 106 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
48 | struct vmw_sw_context *sw_context, | 107 | struct vmw_sw_context *sw_context, |
49 | SVGA3dCmdHeader *header) | 108 | SVGA3dCmdHeader *header) |
50 | { | 109 | { |
110 | struct vmw_resource *ctx; | ||
111 | |||
51 | struct vmw_cid_cmd { | 112 | struct vmw_cid_cmd { |
52 | SVGA3dCmdHeader header; | 113 | SVGA3dCmdHeader header; |
53 | __le32 cid; | 114 | __le32 cid; |
@@ -58,7 +119,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | |||
58 | if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) | 119 | if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) |
59 | return 0; | 120 | return 0; |
60 | 121 | ||
61 | ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid); | 122 | ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid, |
123 | &ctx); | ||
62 | if (unlikely(ret != 0)) { | 124 | if (unlikely(ret != 0)) { |
63 | DRM_ERROR("Could not find or use context %u\n", | 125 | DRM_ERROR("Could not find or use context %u\n", |
64 | (unsigned) cmd->cid); | 126 | (unsigned) cmd->cid); |
@@ -67,6 +129,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | |||
67 | 129 | ||
68 | sw_context->last_cid = cmd->cid; | 130 | sw_context->last_cid = cmd->cid; |
69 | sw_context->cid_valid = true; | 131 | sw_context->cid_valid = true; |
132 | sw_context->cur_ctx = ctx; | ||
133 | vmw_resource_to_validate_list(sw_context, &ctx); | ||
70 | 134 | ||
71 | return 0; | 135 | return 0; |
72 | } | 136 | } |
@@ -75,29 +139,45 @@ static int vmw_cmd_sid_check(struct vmw_private *dev_priv, | |||
75 | struct vmw_sw_context *sw_context, | 139 | struct vmw_sw_context *sw_context, |
76 | uint32_t *sid) | 140 | uint32_t *sid) |
77 | { | 141 | { |
142 | struct vmw_surface *srf; | ||
143 | int ret; | ||
144 | struct vmw_resource *res; | ||
145 | |||
78 | if (*sid == SVGA3D_INVALID_ID) | 146 | if (*sid == SVGA3D_INVALID_ID) |
79 | return 0; | 147 | return 0; |
80 | 148 | ||
81 | if (unlikely((!sw_context->sid_valid || | 149 | if (likely((sw_context->sid_valid && |
82 | *sid != sw_context->last_sid))) { | 150 | *sid == sw_context->last_sid))) { |
83 | int real_id; | 151 | *sid = sw_context->sid_translation; |
84 | int ret = vmw_surface_check(dev_priv, sw_context->tfile, | 152 | return 0; |
85 | *sid, &real_id); | 153 | } |
86 | 154 | ||
87 | if (unlikely(ret != 0)) { | 155 | ret = vmw_user_surface_lookup_handle(dev_priv, |
88 | DRM_ERROR("Could ot find or use surface 0x%08x " | 156 | sw_context->tfile, |
89 | "address 0x%08lx\n", | 157 | *sid, &srf); |
90 | (unsigned int) *sid, | 158 | if (unlikely(ret != 0)) { |
91 | (unsigned long) sid); | 159 | DRM_ERROR("Could ot find or use surface 0x%08x " |
92 | return ret; | 160 | "address 0x%08lx\n", |
93 | } | 161 | (unsigned int) *sid, |
162 | (unsigned long) sid); | ||
163 | return ret; | ||
164 | } | ||
94 | 165 | ||
95 | sw_context->last_sid = *sid; | 166 | ret = vmw_surface_validate(dev_priv, srf); |
96 | sw_context->sid_valid = true; | 167 | if (unlikely(ret != 0)) { |
97 | *sid = real_id; | 168 | if (ret != -ERESTARTSYS) |
98 | sw_context->sid_translation = real_id; | 169 | DRM_ERROR("Could not validate surface.\n"); |
99 | } else | 170 | vmw_surface_unreference(&srf); |
100 | *sid = sw_context->sid_translation; | 171 | return ret; |
172 | } | ||
173 | |||
174 | sw_context->last_sid = *sid; | ||
175 | sw_context->sid_valid = true; | ||
176 | sw_context->sid_translation = srf->res.id; | ||
177 | *sid = sw_context->sid_translation; | ||
178 | |||
179 | res = &srf->res; | ||
180 | vmw_resource_to_validate_list(sw_context, &res); | ||
101 | 181 | ||
102 | return 0; | 182 | return 0; |
103 | } | 183 | } |
@@ -166,6 +246,12 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | |||
166 | } *cmd; | 246 | } *cmd; |
167 | 247 | ||
168 | cmd = container_of(header, struct vmw_sid_cmd, header); | 248 | cmd = container_of(header, struct vmw_sid_cmd, header); |
249 | |||
250 | if (unlikely(!sw_context->kernel)) { | ||
251 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); | ||
252 | return -EPERM; | ||
253 | } | ||
254 | |||
169 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); | 255 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); |
170 | } | 256 | } |
171 | 257 | ||
@@ -178,10 +264,179 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
178 | SVGA3dCmdPresent body; | 264 | SVGA3dCmdPresent body; |
179 | } *cmd; | 265 | } *cmd; |
180 | 266 | ||
267 | |||
181 | cmd = container_of(header, struct vmw_sid_cmd, header); | 268 | cmd = container_of(header, struct vmw_sid_cmd, header); |
269 | |||
270 | if (unlikely(!sw_context->kernel)) { | ||
271 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); | ||
272 | return -EPERM; | ||
273 | } | ||
274 | |||
182 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); | 275 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); |
183 | } | 276 | } |
184 | 277 | ||
278 | /** | ||
279 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. | ||
280 | * | ||
281 | * @dev_priv: The device private structure. | ||
282 | * @cid: The hardware context for the next query. | ||
283 | * @new_query_bo: The new buffer holding query results. | ||
284 | * @sw_context: The software context used for this command submission. | ||
285 | * | ||
286 | * This function checks whether @new_query_bo is suitable for holding | ||
287 | * query results, and if another buffer currently is pinned for query | ||
288 | * results. If so, the function prepares the state of @sw_context for | ||
289 | * switching pinned buffers after successful submission of the current | ||
290 | * command batch. It also checks whether we're using a new query context. | ||
291 | * In that case, it makes sure we emit a query barrier for the old | ||
292 | * context before the current query buffer is fenced. | ||
293 | */ | ||
294 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | ||
295 | uint32_t cid, | ||
296 | struct ttm_buffer_object *new_query_bo, | ||
297 | struct vmw_sw_context *sw_context) | ||
298 | { | ||
299 | int ret; | ||
300 | bool add_cid = false; | ||
301 | uint32_t cid_to_add; | ||
302 | |||
303 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { | ||
304 | |||
305 | if (unlikely(new_query_bo->num_pages > 4)) { | ||
306 | DRM_ERROR("Query buffer too large.\n"); | ||
307 | return -EINVAL; | ||
308 | } | ||
309 | |||
310 | if (unlikely(sw_context->cur_query_bo != NULL)) { | ||
311 | BUG_ON(!sw_context->query_cid_valid); | ||
312 | add_cid = true; | ||
313 | cid_to_add = sw_context->cur_query_cid; | ||
314 | ret = vmw_bo_to_validate_list(sw_context, | ||
315 | sw_context->cur_query_bo, | ||
316 | DRM_VMW_FENCE_FLAG_EXEC, | ||
317 | NULL); | ||
318 | if (unlikely(ret != 0)) | ||
319 | return ret; | ||
320 | } | ||
321 | sw_context->cur_query_bo = new_query_bo; | ||
322 | |||
323 | ret = vmw_bo_to_validate_list(sw_context, | ||
324 | dev_priv->dummy_query_bo, | ||
325 | DRM_VMW_FENCE_FLAG_EXEC, | ||
326 | NULL); | ||
327 | if (unlikely(ret != 0)) | ||
328 | return ret; | ||
329 | |||
330 | } | ||
331 | |||
332 | if (unlikely(cid != sw_context->cur_query_cid && | ||
333 | sw_context->query_cid_valid)) { | ||
334 | add_cid = true; | ||
335 | cid_to_add = sw_context->cur_query_cid; | ||
336 | } | ||
337 | |||
338 | sw_context->cur_query_cid = cid; | ||
339 | sw_context->query_cid_valid = true; | ||
340 | |||
341 | if (add_cid) { | ||
342 | struct vmw_resource *ctx = sw_context->cur_ctx; | ||
343 | |||
344 | if (list_empty(&ctx->query_head)) | ||
345 | list_add_tail(&ctx->query_head, | ||
346 | &sw_context->query_list); | ||
347 | ret = vmw_bo_to_validate_list(sw_context, | ||
348 | dev_priv->dummy_query_bo, | ||
349 | DRM_VMW_FENCE_FLAG_EXEC, | ||
350 | NULL); | ||
351 | if (unlikely(ret != 0)) | ||
352 | return ret; | ||
353 | } | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | |||
358 | /** | ||
359 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer | ||
360 | * | ||
361 | * @dev_priv: The device private structure. | ||
362 | * @sw_context: The software context used for this command submission batch. | ||
363 | * | ||
364 | * This function will check if we're switching query buffers, and will then, | ||
365 | * if no other query waits are issued this command submission batch, | ||
366 | * issue a dummy occlusion query wait used as a query barrier. When the fence | ||
367 | * object following that query wait has signaled, we are sure that all | ||
368 | * preseding queries have finished, and the old query buffer can be unpinned. | ||
369 | * However, since both the new query buffer and the old one are fenced with | ||
370 | * that fence, we can do an asynchronus unpin now, and be sure that the | ||
371 | * old query buffer won't be moved until the fence has signaled. | ||
372 | * | ||
373 | * As mentioned above, both the new - and old query buffers need to be fenced | ||
374 | * using a sequence emitted *after* calling this function. | ||
375 | */ | ||
376 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, | ||
377 | struct vmw_sw_context *sw_context) | ||
378 | { | ||
379 | |||
380 | struct vmw_resource *ctx, *next_ctx; | ||
381 | int ret; | ||
382 | |||
383 | /* | ||
384 | * The validate list should still hold references to all | ||
385 | * contexts here. | ||
386 | */ | ||
387 | |||
388 | list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list, | ||
389 | query_head) { | ||
390 | list_del_init(&ctx->query_head); | ||
391 | |||
392 | BUG_ON(list_empty(&ctx->validate_head)); | ||
393 | |||
394 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); | ||
395 | |||
396 | if (unlikely(ret != 0)) | ||
397 | DRM_ERROR("Out of fifo space for dummy query.\n"); | ||
398 | } | ||
399 | |||
400 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { | ||
401 | if (dev_priv->pinned_bo) { | ||
402 | vmw_bo_pin(dev_priv->pinned_bo, false); | ||
403 | ttm_bo_unref(&dev_priv->pinned_bo); | ||
404 | } | ||
405 | |||
406 | vmw_bo_pin(sw_context->cur_query_bo, true); | ||
407 | |||
408 | /* | ||
409 | * We pin also the dummy_query_bo buffer so that we | ||
410 | * don't need to validate it when emitting | ||
411 | * dummy queries in context destroy paths. | ||
412 | */ | ||
413 | |||
414 | vmw_bo_pin(dev_priv->dummy_query_bo, true); | ||
415 | dev_priv->dummy_query_bo_pinned = true; | ||
416 | |||
417 | dev_priv->query_cid = sw_context->cur_query_cid; | ||
418 | dev_priv->pinned_bo = | ||
419 | ttm_bo_reference(sw_context->cur_query_bo); | ||
420 | } | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * vmw_query_switch_backoff - clear query barrier list | ||
425 | * @sw_context: The sw context used for this submission batch. | ||
426 | * | ||
427 | * This function is used as part of an error path, where a previously | ||
428 | * set up list of query barriers needs to be cleared. | ||
429 | * | ||
430 | */ | ||
431 | static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context) | ||
432 | { | ||
433 | struct list_head *list, *next; | ||
434 | |||
435 | list_for_each_safe(list, next, &sw_context->query_list) { | ||
436 | list_del_init(list); | ||
437 | } | ||
438 | } | ||
439 | |||
185 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | 440 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
186 | struct vmw_sw_context *sw_context, | 441 | struct vmw_sw_context *sw_context, |
187 | SVGAGuestPtr *ptr, | 442 | SVGAGuestPtr *ptr, |
@@ -191,8 +446,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
191 | struct ttm_buffer_object *bo; | 446 | struct ttm_buffer_object *bo; |
192 | uint32_t handle = ptr->gmrId; | 447 | uint32_t handle = ptr->gmrId; |
193 | struct vmw_relocation *reloc; | 448 | struct vmw_relocation *reloc; |
194 | uint32_t cur_validate_node; | ||
195 | struct ttm_validate_buffer *val_buf; | ||
196 | int ret; | 449 | int ret; |
197 | 450 | ||
198 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 451 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
@@ -212,22 +465,11 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
212 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 465 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
213 | reloc->location = ptr; | 466 | reloc->location = ptr; |
214 | 467 | ||
215 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | 468 | ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC, |
216 | if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { | 469 | &reloc->index); |
217 | DRM_ERROR("Max number of DMA buffers per submission" | 470 | if (unlikely(ret != 0)) |
218 | " exceeded.\n"); | ||
219 | ret = -EINVAL; | ||
220 | goto out_no_reloc; | 471 | goto out_no_reloc; |
221 | } | ||
222 | 472 | ||
223 | reloc->index = cur_validate_node; | ||
224 | if (unlikely(cur_validate_node == sw_context->cur_val_buf)) { | ||
225 | val_buf = &sw_context->val_bufs[cur_validate_node]; | ||
226 | val_buf->bo = ttm_bo_reference(bo); | ||
227 | val_buf->new_sync_obj_arg = (void *) dev_priv; | ||
228 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | ||
229 | ++sw_context->cur_val_buf; | ||
230 | } | ||
231 | *vmw_bo_p = vmw_bo; | 473 | *vmw_bo_p = vmw_bo; |
232 | return 0; | 474 | return 0; |
233 | 475 | ||
@@ -259,8 +501,11 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, | |||
259 | if (unlikely(ret != 0)) | 501 | if (unlikely(ret != 0)) |
260 | return ret; | 502 | return ret; |
261 | 503 | ||
504 | ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid, | ||
505 | &vmw_bo->base, sw_context); | ||
506 | |||
262 | vmw_dmabuf_unreference(&vmw_bo); | 507 | vmw_dmabuf_unreference(&vmw_bo); |
263 | return 0; | 508 | return ret; |
264 | } | 509 | } |
265 | 510 | ||
266 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | 511 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
@@ -273,6 +518,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | |||
273 | SVGA3dCmdWaitForQuery q; | 518 | SVGA3dCmdWaitForQuery q; |
274 | } *cmd; | 519 | } *cmd; |
275 | int ret; | 520 | int ret; |
521 | struct vmw_resource *ctx; | ||
276 | 522 | ||
277 | cmd = container_of(header, struct vmw_query_cmd, header); | 523 | cmd = container_of(header, struct vmw_query_cmd, header); |
278 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 524 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
@@ -286,10 +532,19 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | |||
286 | return ret; | 532 | return ret; |
287 | 533 | ||
288 | vmw_dmabuf_unreference(&vmw_bo); | 534 | vmw_dmabuf_unreference(&vmw_bo); |
535 | |||
536 | /* | ||
537 | * This wait will act as a barrier for previous waits for this | ||
538 | * context. | ||
539 | */ | ||
540 | |||
541 | ctx = sw_context->cur_ctx; | ||
542 | if (!list_empty(&ctx->query_head)) | ||
543 | list_del_init(&ctx->query_head); | ||
544 | |||
289 | return 0; | 545 | return 0; |
290 | } | 546 | } |
291 | 547 | ||
292 | |||
293 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | 548 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
294 | struct vmw_sw_context *sw_context, | 549 | struct vmw_sw_context *sw_context, |
295 | SVGA3dCmdHeader *header) | 550 | SVGA3dCmdHeader *header) |
@@ -302,6 +557,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
302 | SVGA3dCmdSurfaceDMA dma; | 557 | SVGA3dCmdSurfaceDMA dma; |
303 | } *cmd; | 558 | } *cmd; |
304 | int ret; | 559 | int ret; |
560 | struct vmw_resource *res; | ||
305 | 561 | ||
306 | cmd = container_of(header, struct vmw_dma_cmd, header); | 562 | cmd = container_of(header, struct vmw_dma_cmd, header); |
307 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | 563 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
@@ -318,18 +574,28 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
318 | goto out_no_reloc; | 574 | goto out_no_reloc; |
319 | } | 575 | } |
320 | 576 | ||
321 | /** | 577 | ret = vmw_surface_validate(dev_priv, srf); |
578 | if (unlikely(ret != 0)) { | ||
579 | if (ret != -ERESTARTSYS) | ||
580 | DRM_ERROR("Culd not validate surface.\n"); | ||
581 | goto out_no_validate; | ||
582 | } | ||
583 | |||
584 | /* | ||
322 | * Patch command stream with device SID. | 585 | * Patch command stream with device SID. |
323 | */ | 586 | */ |
324 | |||
325 | cmd->dma.host.sid = srf->res.id; | 587 | cmd->dma.host.sid = srf->res.id; |
326 | vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); | 588 | vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); |
327 | /** | ||
328 | * FIXME: May deadlock here when called from the | ||
329 | * command parsing code. | ||
330 | */ | ||
331 | vmw_surface_unreference(&srf); | ||
332 | 589 | ||
590 | vmw_dmabuf_unreference(&vmw_bo); | ||
591 | |||
592 | res = &srf->res; | ||
593 | vmw_resource_to_validate_list(sw_context, &res); | ||
594 | |||
595 | return 0; | ||
596 | |||
597 | out_no_validate: | ||
598 | vmw_surface_unreference(&srf); | ||
333 | out_no_reloc: | 599 | out_no_reloc: |
334 | vmw_dmabuf_unreference(&vmw_bo); | 600 | vmw_dmabuf_unreference(&vmw_bo); |
335 | return ret; | 601 | return ret; |
@@ -419,6 +685,71 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
419 | return 0; | 685 | return 0; |
420 | } | 686 | } |
421 | 687 | ||
688 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | ||
689 | struct vmw_sw_context *sw_context, | ||
690 | void *buf) | ||
691 | { | ||
692 | struct vmw_dma_buffer *vmw_bo; | ||
693 | int ret; | ||
694 | |||
695 | struct { | ||
696 | uint32_t header; | ||
697 | SVGAFifoCmdDefineGMRFB body; | ||
698 | } *cmd = buf; | ||
699 | |||
700 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
701 | &cmd->body.ptr, | ||
702 | &vmw_bo); | ||
703 | if (unlikely(ret != 0)) | ||
704 | return ret; | ||
705 | |||
706 | vmw_dmabuf_unreference(&vmw_bo); | ||
707 | |||
708 | return ret; | ||
709 | } | ||
710 | |||
711 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | ||
712 | struct vmw_sw_context *sw_context, | ||
713 | void *buf, uint32_t *size) | ||
714 | { | ||
715 | uint32_t size_remaining = *size; | ||
716 | uint32_t cmd_id; | ||
717 | |||
718 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); | ||
719 | switch (cmd_id) { | ||
720 | case SVGA_CMD_UPDATE: | ||
721 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); | ||
722 | break; | ||
723 | case SVGA_CMD_DEFINE_GMRFB: | ||
724 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); | ||
725 | break; | ||
726 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: | ||
727 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); | ||
728 | break; | ||
729 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: | ||
730 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); | ||
731 | break; | ||
732 | default: | ||
733 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); | ||
734 | return -EINVAL; | ||
735 | } | ||
736 | |||
737 | if (*size > size_remaining) { | ||
738 | DRM_ERROR("Invalid SVGA command (size mismatch):" | ||
739 | " %u.\n", cmd_id); | ||
740 | return -EINVAL; | ||
741 | } | ||
742 | |||
743 | if (unlikely(!sw_context->kernel)) { | ||
744 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); | ||
745 | return -EPERM; | ||
746 | } | ||
747 | |||
748 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) | ||
749 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); | ||
750 | |||
751 | return 0; | ||
752 | } | ||
422 | 753 | ||
423 | typedef int (*vmw_cmd_func) (struct vmw_private *, | 754 | typedef int (*vmw_cmd_func) (struct vmw_private *, |
424 | struct vmw_sw_context *, | 755 | struct vmw_sw_context *, |
@@ -471,11 +802,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
471 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; | 802 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
472 | int ret; | 803 | int ret; |
473 | 804 | ||
474 | cmd_id = ((uint32_t *)buf)[0]; | 805 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
475 | if (cmd_id == SVGA_CMD_UPDATE) { | 806 | /* Handle any none 3D commands */ |
476 | *size = 5 << 2; | 807 | if (unlikely(cmd_id < SVGA_CMD_MAX)) |
477 | return 0; | 808 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); |
478 | } | 809 | |
479 | 810 | ||
480 | cmd_id = le32_to_cpu(header->id); | 811 | cmd_id = le32_to_cpu(header->id); |
481 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); | 812 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); |
@@ -500,7 +831,8 @@ out_err: | |||
500 | 831 | ||
501 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, | 832 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
502 | struct vmw_sw_context *sw_context, | 833 | struct vmw_sw_context *sw_context, |
503 | void *buf, uint32_t size) | 834 | void *buf, |
835 | uint32_t size) | ||
504 | { | 836 | { |
505 | int32_t cur_size = size; | 837 | int32_t cur_size = size; |
506 | int ret; | 838 | int ret; |
@@ -550,7 +882,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
550 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) | 882 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
551 | { | 883 | { |
552 | struct ttm_validate_buffer *entry, *next; | 884 | struct ttm_validate_buffer *entry, *next; |
885 | struct vmw_resource *res, *res_next; | ||
553 | 886 | ||
887 | /* | ||
888 | * Drop references to DMA buffers held during command submission. | ||
889 | */ | ||
554 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, | 890 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
555 | head) { | 891 | head) { |
556 | list_del(&entry->head); | 892 | list_del(&entry->head); |
@@ -559,6 +895,16 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context) | |||
559 | sw_context->cur_val_buf--; | 895 | sw_context->cur_val_buf--; |
560 | } | 896 | } |
561 | BUG_ON(sw_context->cur_val_buf != 0); | 897 | BUG_ON(sw_context->cur_val_buf != 0); |
898 | |||
899 | /* | ||
900 | * Drop references to resources held during command submission. | ||
901 | */ | ||
902 | vmw_resource_unreserve(&sw_context->resource_list); | ||
903 | list_for_each_entry_safe(res, res_next, &sw_context->resource_list, | ||
904 | validate_head) { | ||
905 | list_del_init(&res->validate_head); | ||
906 | vmw_resource_unreference(&res); | ||
907 | } | ||
562 | } | 908 | } |
563 | 909 | ||
564 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | 910 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
@@ -566,6 +912,16 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
566 | { | 912 | { |
567 | int ret; | 913 | int ret; |
568 | 914 | ||
915 | |||
916 | /* | ||
917 | * Don't validate pinned buffers. | ||
918 | */ | ||
919 | |||
920 | if (bo == dev_priv->pinned_bo || | ||
921 | (bo == dev_priv->dummy_query_bo && | ||
922 | dev_priv->dummy_query_bo_pinned)) | ||
923 | return 0; | ||
924 | |||
569 | /** | 925 | /** |
570 | * Put BO in VRAM if there is space, otherwise as a GMR. | 926 | * Put BO in VRAM if there is space, otherwise as a GMR. |
571 | * If there is no space in VRAM and GMR ids are all used up, | 927 | * If there is no space in VRAM and GMR ids are all used up, |
@@ -602,57 +958,208 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv, | |||
602 | return 0; | 958 | return 0; |
603 | } | 959 | } |
604 | 960 | ||
605 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | 961 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
606 | struct drm_file *file_priv) | 962 | uint32_t size) |
963 | { | ||
964 | if (likely(sw_context->cmd_bounce_size >= size)) | ||
965 | return 0; | ||
966 | |||
967 | if (sw_context->cmd_bounce_size == 0) | ||
968 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; | ||
969 | |||
970 | while (sw_context->cmd_bounce_size < size) { | ||
971 | sw_context->cmd_bounce_size = | ||
972 | PAGE_ALIGN(sw_context->cmd_bounce_size + | ||
973 | (sw_context->cmd_bounce_size >> 1)); | ||
974 | } | ||
975 | |||
976 | if (sw_context->cmd_bounce != NULL) | ||
977 | vfree(sw_context->cmd_bounce); | ||
978 | |||
979 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); | ||
980 | |||
981 | if (sw_context->cmd_bounce == NULL) { | ||
982 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); | ||
983 | sw_context->cmd_bounce_size = 0; | ||
984 | return -ENOMEM; | ||
985 | } | ||
986 | |||
987 | return 0; | ||
988 | } | ||
989 | |||
990 | /** | ||
991 | * vmw_execbuf_fence_commands - create and submit a command stream fence | ||
992 | * | ||
993 | * Creates a fence object and submits a command stream marker. | ||
994 | * If this fails for some reason, We sync the fifo and return NULL. | ||
995 | * It is then safe to fence buffers with a NULL pointer. | ||
996 | * | ||
997 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates | ||
998 | * a userspace handle if @p_handle is not NULL, otherwise not. | ||
999 | */ | ||
1000 | |||
1001 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, | ||
1002 | struct vmw_private *dev_priv, | ||
1003 | struct vmw_fence_obj **p_fence, | ||
1004 | uint32_t *p_handle) | ||
607 | { | 1005 | { |
608 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
609 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; | ||
610 | struct drm_vmw_fence_rep fence_rep; | ||
611 | struct drm_vmw_fence_rep __user *user_fence_rep; | ||
612 | int ret; | ||
613 | void *user_cmd; | ||
614 | void *cmd; | ||
615 | uint32_t sequence; | 1006 | uint32_t sequence; |
616 | struct vmw_sw_context *sw_context = &dev_priv->ctx; | 1007 | int ret; |
617 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 1008 | bool synced = false; |
618 | 1009 | ||
619 | ret = ttm_read_lock(&vmaster->lock, true); | 1010 | /* p_handle implies file_priv. */ |
620 | if (unlikely(ret != 0)) | 1011 | BUG_ON(p_handle != NULL && file_priv == NULL); |
621 | return ret; | ||
622 | 1012 | ||
623 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); | 1013 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
624 | if (unlikely(ret != 0)) { | 1014 | if (unlikely(ret != 0)) { |
625 | ret = -ERESTARTSYS; | 1015 | DRM_ERROR("Fence submission error. Syncing.\n"); |
626 | goto out_no_cmd_mutex; | 1016 | synced = true; |
627 | } | 1017 | } |
628 | 1018 | ||
629 | cmd = vmw_fifo_reserve(dev_priv, arg->command_size); | 1019 | if (p_handle != NULL) |
630 | if (unlikely(cmd == NULL)) { | 1020 | ret = vmw_user_fence_create(file_priv, dev_priv->fman, |
631 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 1021 | sequence, |
632 | ret = -ENOMEM; | 1022 | DRM_VMW_FENCE_FLAG_EXEC, |
633 | goto out_unlock; | 1023 | p_fence, p_handle); |
1024 | else | ||
1025 | ret = vmw_fence_create(dev_priv->fman, sequence, | ||
1026 | DRM_VMW_FENCE_FLAG_EXEC, | ||
1027 | p_fence); | ||
1028 | |||
1029 | if (unlikely(ret != 0 && !synced)) { | ||
1030 | (void) vmw_fallback_wait(dev_priv, false, false, | ||
1031 | sequence, false, | ||
1032 | VMW_FENCE_WAIT_TIMEOUT); | ||
1033 | *p_fence = NULL; | ||
634 | } | 1034 | } |
635 | 1035 | ||
636 | user_cmd = (void __user *)(unsigned long)arg->commands; | 1036 | return 0; |
637 | ret = copy_from_user(cmd, user_cmd, arg->command_size); | 1037 | } |
638 | 1038 | ||
639 | if (unlikely(ret != 0)) { | 1039 | /** |
640 | ret = -EFAULT; | 1040 | * vmw_execbuf_copy_fence_user - copy fence object information to |
641 | DRM_ERROR("Failed copying commands.\n"); | 1041 | * user-space. |
642 | goto out_commit; | 1042 | * |
1043 | * @dev_priv: Pointer to a vmw_private struct. | ||
1044 | * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. | ||
1045 | * @ret: Return value from fence object creation. | ||
1046 | * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to | ||
1047 | * which the information should be copied. | ||
1048 | * @fence: Pointer to the fenc object. | ||
1049 | * @fence_handle: User-space fence handle. | ||
1050 | * | ||
1051 | * This function copies fence information to user-space. If copying fails, | ||
1052 | * The user-space struct drm_vmw_fence_rep::error member is hopefully | ||
1053 | * left untouched, and if it's preloaded with an -EFAULT by user-space, | ||
1054 | * the error will hopefully be detected. | ||
1055 | * Also if copying fails, user-space will be unable to signal the fence | ||
1056 | * object so we wait for it immediately, and then unreference the | ||
1057 | * user-space reference. | ||
1058 | */ | ||
1059 | void | ||
1060 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | ||
1061 | struct vmw_fpriv *vmw_fp, | ||
1062 | int ret, | ||
1063 | struct drm_vmw_fence_rep __user *user_fence_rep, | ||
1064 | struct vmw_fence_obj *fence, | ||
1065 | uint32_t fence_handle) | ||
1066 | { | ||
1067 | struct drm_vmw_fence_rep fence_rep; | ||
1068 | |||
1069 | if (user_fence_rep == NULL) | ||
1070 | return; | ||
1071 | |||
1072 | memset(&fence_rep, 0, sizeof(fence_rep)); | ||
1073 | |||
1074 | fence_rep.error = ret; | ||
1075 | if (ret == 0) { | ||
1076 | BUG_ON(fence == NULL); | ||
1077 | |||
1078 | fence_rep.handle = fence_handle; | ||
1079 | fence_rep.seqno = fence->seqno; | ||
1080 | vmw_update_seqno(dev_priv, &dev_priv->fifo); | ||
1081 | fence_rep.passed_seqno = dev_priv->last_read_seqno; | ||
643 | } | 1082 | } |
644 | 1083 | ||
1084 | /* | ||
1085 | * copy_to_user errors will be detected by user space not | ||
1086 | * seeing fence_rep::error filled in. Typically | ||
1087 | * user-space would have pre-set that member to -EFAULT. | ||
1088 | */ | ||
1089 | ret = copy_to_user(user_fence_rep, &fence_rep, | ||
1090 | sizeof(fence_rep)); | ||
1091 | |||
1092 | /* | ||
1093 | * User-space lost the fence object. We need to sync | ||
1094 | * and unreference the handle. | ||
1095 | */ | ||
1096 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { | ||
1097 | ttm_ref_object_base_unref(vmw_fp->tfile, | ||
1098 | fence_handle, TTM_REF_USAGE); | ||
1099 | DRM_ERROR("Fence copy error. Syncing.\n"); | ||
1100 | (void) vmw_fence_obj_wait(fence, fence->signal_mask, | ||
1101 | false, false, | ||
1102 | VMW_FENCE_WAIT_TIMEOUT); | ||
1103 | } | ||
1104 | } | ||
1105 | |||
1106 | int vmw_execbuf_process(struct drm_file *file_priv, | ||
1107 | struct vmw_private *dev_priv, | ||
1108 | void __user *user_commands, | ||
1109 | void *kernel_commands, | ||
1110 | uint32_t command_size, | ||
1111 | uint64_t throttle_us, | ||
1112 | struct drm_vmw_fence_rep __user *user_fence_rep) | ||
1113 | { | ||
1114 | struct vmw_sw_context *sw_context = &dev_priv->ctx; | ||
1115 | struct vmw_fence_obj *fence; | ||
1116 | uint32_t handle; | ||
1117 | void *cmd; | ||
1118 | int ret; | ||
1119 | |||
1120 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); | ||
1121 | if (unlikely(ret != 0)) | ||
1122 | return -ERESTARTSYS; | ||
1123 | |||
1124 | if (kernel_commands == NULL) { | ||
1125 | sw_context->kernel = false; | ||
1126 | |||
1127 | ret = vmw_resize_cmd_bounce(sw_context, command_size); | ||
1128 | if (unlikely(ret != 0)) | ||
1129 | goto out_unlock; | ||
1130 | |||
1131 | |||
1132 | ret = copy_from_user(sw_context->cmd_bounce, | ||
1133 | user_commands, command_size); | ||
1134 | |||
1135 | if (unlikely(ret != 0)) { | ||
1136 | ret = -EFAULT; | ||
1137 | DRM_ERROR("Failed copying commands.\n"); | ||
1138 | goto out_unlock; | ||
1139 | } | ||
1140 | kernel_commands = sw_context->cmd_bounce; | ||
1141 | } else | ||
1142 | sw_context->kernel = true; | ||
1143 | |||
645 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; | 1144 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; |
646 | sw_context->cid_valid = false; | 1145 | sw_context->cid_valid = false; |
647 | sw_context->sid_valid = false; | 1146 | sw_context->sid_valid = false; |
648 | sw_context->cur_reloc = 0; | 1147 | sw_context->cur_reloc = 0; |
649 | sw_context->cur_val_buf = 0; | 1148 | sw_context->cur_val_buf = 0; |
1149 | sw_context->fence_flags = 0; | ||
1150 | INIT_LIST_HEAD(&sw_context->query_list); | ||
1151 | INIT_LIST_HEAD(&sw_context->resource_list); | ||
1152 | sw_context->cur_query_bo = dev_priv->pinned_bo; | ||
1153 | sw_context->cur_query_cid = dev_priv->query_cid; | ||
1154 | sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL); | ||
650 | 1155 | ||
651 | INIT_LIST_HEAD(&sw_context->validate_nodes); | 1156 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
652 | 1157 | ||
653 | ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); | 1158 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
1159 | command_size); | ||
654 | if (unlikely(ret != 0)) | 1160 | if (unlikely(ret != 0)) |
655 | goto out_err; | 1161 | goto out_err; |
1162 | |||
656 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); | 1163 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); |
657 | if (unlikely(ret != 0)) | 1164 | if (unlikely(ret != 0)) |
658 | goto out_err; | 1165 | goto out_err; |
@@ -663,57 +1170,206 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
663 | 1170 | ||
664 | vmw_apply_relocations(sw_context); | 1171 | vmw_apply_relocations(sw_context); |
665 | 1172 | ||
666 | if (arg->throttle_us) { | 1173 | if (throttle_us) { |
667 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue, | 1174 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
668 | arg->throttle_us); | 1175 | throttle_us); |
669 | 1176 | ||
670 | if (unlikely(ret != 0)) | 1177 | if (unlikely(ret != 0)) |
671 | goto out_err; | 1178 | goto out_throttle; |
672 | } | 1179 | } |
673 | 1180 | ||
674 | vmw_fifo_commit(dev_priv, arg->command_size); | 1181 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
675 | 1182 | if (unlikely(cmd == NULL)) { | |
676 | ret = vmw_fifo_send_fence(dev_priv, &sequence); | 1183 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
1184 | ret = -ENOMEM; | ||
1185 | goto out_throttle; | ||
1186 | } | ||
677 | 1187 | ||
678 | ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, | 1188 | memcpy(cmd, kernel_commands, command_size); |
679 | (void *)(unsigned long) sequence); | 1189 | vmw_fifo_commit(dev_priv, command_size); |
680 | vmw_clear_validations(sw_context); | ||
681 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
682 | 1190 | ||
1191 | vmw_query_bo_switch_commit(dev_priv, sw_context); | ||
1192 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, | ||
1193 | &fence, | ||
1194 | (user_fence_rep) ? &handle : NULL); | ||
683 | /* | 1195 | /* |
684 | * This error is harmless, because if fence submission fails, | 1196 | * This error is harmless, because if fence submission fails, |
685 | * vmw_fifo_send_fence will sync. | 1197 | * vmw_fifo_send_fence will sync. The error will be propagated to |
1198 | * user-space in @fence_rep | ||
686 | */ | 1199 | */ |
687 | 1200 | ||
688 | if (ret != 0) | 1201 | if (ret != 0) |
689 | DRM_ERROR("Fence submission error. Syncing.\n"); | 1202 | DRM_ERROR("Fence submission error. Syncing.\n"); |
690 | 1203 | ||
691 | fence_rep.error = ret; | 1204 | ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, |
692 | fence_rep.fence_seq = (uint64_t) sequence; | 1205 | (void *) fence); |
693 | fence_rep.pad64 = 0; | ||
694 | |||
695 | user_fence_rep = (struct drm_vmw_fence_rep __user *) | ||
696 | (unsigned long)arg->fence_rep; | ||
697 | 1206 | ||
698 | /* | 1207 | vmw_clear_validations(sw_context); |
699 | * copy_to_user errors will be detected by user space not | 1208 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
700 | * seeing fence_rep::error filled in. | 1209 | user_fence_rep, fence, handle); |
701 | */ | ||
702 | 1210 | ||
703 | ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep)); | 1211 | if (likely(fence != NULL)) |
1212 | vmw_fence_obj_unreference(&fence); | ||
704 | 1213 | ||
705 | vmw_kms_cursor_post_execbuf(dev_priv); | 1214 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
706 | ttm_read_unlock(&vmaster->lock); | ||
707 | return 0; | 1215 | return 0; |
1216 | |||
708 | out_err: | 1217 | out_err: |
709 | vmw_free_relocations(sw_context); | 1218 | vmw_free_relocations(sw_context); |
1219 | out_throttle: | ||
1220 | vmw_query_switch_backoff(sw_context); | ||
710 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); | 1221 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); |
711 | vmw_clear_validations(sw_context); | 1222 | vmw_clear_validations(sw_context); |
712 | out_commit: | ||
713 | vmw_fifo_commit(dev_priv, 0); | ||
714 | out_unlock: | 1223 | out_unlock: |
715 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1224 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
716 | out_no_cmd_mutex: | 1225 | return ret; |
1226 | } | ||
1227 | |||
1228 | /** | ||
1229 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. | ||
1230 | * | ||
1231 | * @dev_priv: The device private structure. | ||
1232 | * | ||
1233 | * This function is called to idle the fifo and unpin the query buffer | ||
1234 | * if the normal way to do this hits an error, which should typically be | ||
1235 | * extremely rare. | ||
1236 | */ | ||
1237 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) | ||
1238 | { | ||
1239 | DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); | ||
1240 | |||
1241 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); | ||
1242 | vmw_bo_pin(dev_priv->pinned_bo, false); | ||
1243 | vmw_bo_pin(dev_priv->dummy_query_bo, false); | ||
1244 | dev_priv->dummy_query_bo_pinned = false; | ||
1245 | } | ||
1246 | |||
1247 | |||
1248 | /** | ||
1249 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned | ||
1250 | * query bo. | ||
1251 | * | ||
1252 | * @dev_priv: The device private structure. | ||
1253 | * @only_on_cid_match: Only flush and unpin if the current active query cid | ||
1254 | * matches @cid. | ||
1255 | * @cid: Optional context id to match. | ||
1256 | * | ||
1257 | * This function should be used to unpin the pinned query bo, or | ||
1258 | * as a query barrier when we need to make sure that all queries have | ||
1259 | * finished before the next fifo command. (For example on hardware | ||
1260 | * context destructions where the hardware may otherwise leak unfinished | ||
1261 | * queries). | ||
1262 | * | ||
1263 | * This function does not return any failure codes, but make attempts | ||
1264 | * to do safe unpinning in case of errors. | ||
1265 | * | ||
1266 | * The function will synchronize on the previous query barrier, and will | ||
1267 | * thus not finish until that barrier has executed. | ||
1268 | */ | ||
1269 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | ||
1270 | bool only_on_cid_match, uint32_t cid) | ||
1271 | { | ||
1272 | int ret = 0; | ||
1273 | struct list_head validate_list; | ||
1274 | struct ttm_validate_buffer pinned_val, query_val; | ||
1275 | struct vmw_fence_obj *fence; | ||
1276 | |||
1277 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
1278 | |||
1279 | if (dev_priv->pinned_bo == NULL) | ||
1280 | goto out_unlock; | ||
1281 | |||
1282 | if (only_on_cid_match && cid != dev_priv->query_cid) | ||
1283 | goto out_unlock; | ||
1284 | |||
1285 | INIT_LIST_HEAD(&validate_list); | ||
1286 | |||
1287 | pinned_val.new_sync_obj_arg = (void *)(unsigned long) | ||
1288 | DRM_VMW_FENCE_FLAG_EXEC; | ||
1289 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); | ||
1290 | list_add_tail(&pinned_val.head, &validate_list); | ||
1291 | |||
1292 | query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg; | ||
1293 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); | ||
1294 | list_add_tail(&query_val.head, &validate_list); | ||
1295 | |||
1296 | do { | ||
1297 | ret = ttm_eu_reserve_buffers(&validate_list); | ||
1298 | } while (ret == -ERESTARTSYS); | ||
1299 | |||
1300 | if (unlikely(ret != 0)) { | ||
1301 | vmw_execbuf_unpin_panic(dev_priv); | ||
1302 | goto out_no_reserve; | ||
1303 | } | ||
1304 | |||
1305 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); | ||
1306 | if (unlikely(ret != 0)) { | ||
1307 | vmw_execbuf_unpin_panic(dev_priv); | ||
1308 | goto out_no_emit; | ||
1309 | } | ||
1310 | |||
1311 | vmw_bo_pin(dev_priv->pinned_bo, false); | ||
1312 | vmw_bo_pin(dev_priv->dummy_query_bo, false); | ||
1313 | dev_priv->dummy_query_bo_pinned = false; | ||
1314 | |||
1315 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | ||
1316 | ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); | ||
1317 | |||
1318 | ttm_bo_unref(&query_val.bo); | ||
1319 | ttm_bo_unref(&pinned_val.bo); | ||
1320 | ttm_bo_unref(&dev_priv->pinned_bo); | ||
1321 | |||
1322 | out_unlock: | ||
1323 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
1324 | return; | ||
1325 | |||
1326 | out_no_emit: | ||
1327 | ttm_eu_backoff_reservation(&validate_list); | ||
1328 | out_no_reserve: | ||
1329 | ttm_bo_unref(&query_val.bo); | ||
1330 | ttm_bo_unref(&pinned_val.bo); | ||
1331 | ttm_bo_unref(&dev_priv->pinned_bo); | ||
1332 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
1333 | } | ||
1334 | |||
1335 | |||
1336 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | ||
1337 | struct drm_file *file_priv) | ||
1338 | { | ||
1339 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1340 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; | ||
1341 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1342 | int ret; | ||
1343 | |||
1344 | /* | ||
1345 | * This will allow us to extend the ioctl argument while | ||
1346 | * maintaining backwards compatibility: | ||
1347 | * We take different code paths depending on the value of | ||
1348 | * arg->version. | ||
1349 | */ | ||
1350 | |||
1351 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { | ||
1352 | DRM_ERROR("Incorrect execbuf version.\n"); | ||
1353 | DRM_ERROR("You're running outdated experimental " | ||
1354 | "vmwgfx user-space drivers."); | ||
1355 | return -EINVAL; | ||
1356 | } | ||
1357 | |||
1358 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1359 | if (unlikely(ret != 0)) | ||
1360 | return ret; | ||
1361 | |||
1362 | ret = vmw_execbuf_process(file_priv, dev_priv, | ||
1363 | (void __user *)(unsigned long)arg->commands, | ||
1364 | NULL, arg->command_size, arg->throttle_us, | ||
1365 | (void __user *)(unsigned long)arg->fence_rep); | ||
1366 | |||
1367 | if (unlikely(ret != 0)) | ||
1368 | goto out_unlock; | ||
1369 | |||
1370 | vmw_kms_cursor_post_execbuf(dev_priv); | ||
1371 | |||
1372 | out_unlock: | ||
717 | ttm_read_unlock(&vmaster->lock); | 1373 | ttm_read_unlock(&vmaster->lock); |
718 | return ret; | 1374 | return ret; |
719 | } | 1375 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index bfab60c938ac..070797b7b03a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -158,10 +158,14 @@ static int vmw_fb_set_par(struct fb_info *info) | |||
158 | { | 158 | { |
159 | struct vmw_fb_par *par = info->par; | 159 | struct vmw_fb_par *par = info->par; |
160 | struct vmw_private *vmw_priv = par->vmw_priv; | 160 | struct vmw_private *vmw_priv = par->vmw_priv; |
161 | int ret; | ||
162 | |||
163 | ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, | ||
164 | info->fix.line_length, | ||
165 | par->bpp, par->depth); | ||
166 | if (ret) | ||
167 | return ret; | ||
161 | 168 | ||
162 | vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, | ||
163 | info->fix.line_length, | ||
164 | par->bpp, par->depth); | ||
165 | if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) { | 169 | if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) { |
166 | /* TODO check if pitch and offset changes */ | 170 | /* TODO check if pitch and offset changes */ |
167 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | 171 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); |
@@ -405,14 +409,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
405 | struct fb_info *info; | 409 | struct fb_info *info; |
406 | unsigned initial_width, initial_height; | 410 | unsigned initial_width, initial_height; |
407 | unsigned fb_width, fb_height; | 411 | unsigned fb_width, fb_height; |
408 | unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; | 412 | unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size; |
409 | int ret; | 413 | int ret; |
410 | 414 | ||
411 | /* XXX These shouldn't be hardcoded. */ | 415 | /* XXX These shouldn't be hardcoded. */ |
412 | initial_width = 800; | 416 | initial_width = 800; |
413 | initial_height = 600; | 417 | initial_height = 600; |
414 | 418 | ||
415 | fb_bbp = 32; | 419 | fb_bpp = 32; |
416 | fb_depth = 24; | 420 | fb_depth = 24; |
417 | 421 | ||
418 | /* XXX As shouldn't these be as well. */ | 422 | /* XXX As shouldn't these be as well. */ |
@@ -422,7 +426,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
422 | initial_width = min(fb_width, initial_width); | 426 | initial_width = min(fb_width, initial_width); |
423 | initial_height = min(fb_height, initial_height); | 427 | initial_height = min(fb_height, initial_height); |
424 | 428 | ||
425 | fb_pitch = fb_width * fb_bbp / 8; | 429 | fb_pitch = fb_width * fb_bpp / 8; |
426 | fb_size = fb_pitch * fb_height; | 430 | fb_size = fb_pitch * fb_height; |
427 | fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); | 431 | fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); |
428 | 432 | ||
@@ -437,7 +441,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
437 | par = info->par; | 441 | par = info->par; |
438 | par->vmw_priv = vmw_priv; | 442 | par->vmw_priv = vmw_priv; |
439 | par->depth = fb_depth; | 443 | par->depth = fb_depth; |
440 | par->bpp = fb_bbp; | 444 | par->bpp = fb_bpp; |
441 | par->vmalloc = NULL; | 445 | par->vmalloc = NULL; |
442 | par->max_width = fb_width; | 446 | par->max_width = fb_width; |
443 | par->max_height = fb_height; | 447 | par->max_height = fb_height; |
@@ -588,58 +592,6 @@ int vmw_fb_close(struct vmw_private *vmw_priv) | |||
588 | return 0; | 592 | return 0; |
589 | } | 593 | } |
590 | 594 | ||
591 | int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, | ||
592 | struct vmw_dma_buffer *vmw_bo) | ||
593 | { | ||
594 | struct ttm_buffer_object *bo = &vmw_bo->base; | ||
595 | int ret = 0; | ||
596 | |||
597 | ret = ttm_bo_reserve(bo, false, false, false, 0); | ||
598 | if (unlikely(ret != 0)) | ||
599 | return ret; | ||
600 | |||
601 | ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false); | ||
602 | ttm_bo_unreserve(bo); | ||
603 | |||
604 | return ret; | ||
605 | } | ||
606 | |||
607 | int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | ||
608 | struct vmw_dma_buffer *vmw_bo) | ||
609 | { | ||
610 | struct ttm_buffer_object *bo = &vmw_bo->base; | ||
611 | struct ttm_placement ne_placement = vmw_vram_ne_placement; | ||
612 | int ret = 0; | ||
613 | |||
614 | ne_placement.lpfn = bo->num_pages; | ||
615 | |||
616 | /* interuptable? */ | ||
617 | ret = ttm_write_lock(&vmw_priv->active_master->lock, false); | ||
618 | if (unlikely(ret != 0)) | ||
619 | return ret; | ||
620 | |||
621 | ret = ttm_bo_reserve(bo, false, false, false, 0); | ||
622 | if (unlikely(ret != 0)) | ||
623 | goto err_unlock; | ||
624 | |||
625 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
626 | bo->mem.start < bo->num_pages && | ||
627 | bo->mem.start > 0) | ||
628 | (void) ttm_bo_validate(bo, &vmw_sys_placement, false, | ||
629 | false, false); | ||
630 | |||
631 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); | ||
632 | |||
633 | /* Could probably bug on */ | ||
634 | WARN_ON(bo->offset != 0); | ||
635 | |||
636 | ttm_bo_unreserve(bo); | ||
637 | err_unlock: | ||
638 | ttm_write_unlock(&vmw_priv->active_master->lock); | ||
639 | |||
640 | return ret; | ||
641 | } | ||
642 | |||
643 | int vmw_fb_off(struct vmw_private *vmw_priv) | 595 | int vmw_fb_off(struct vmw_private *vmw_priv) |
644 | { | 596 | { |
645 | struct fb_info *info; | 597 | struct fb_info *info; |
@@ -661,7 +613,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv) | |||
661 | par->bo_ptr = NULL; | 613 | par->bo_ptr = NULL; |
662 | ttm_bo_kunmap(&par->map); | 614 | ttm_bo_kunmap(&par->map); |
663 | 615 | ||
664 | vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo); | 616 | vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false); |
665 | 617 | ||
666 | return 0; | 618 | return 0; |
667 | } | 619 | } |
@@ -687,7 +639,7 @@ int vmw_fb_on(struct vmw_private *vmw_priv) | |||
687 | /* Make sure that all overlays are stoped when we take over */ | 639 | /* Make sure that all overlays are stoped when we take over */ |
688 | vmw_overlay_stop_all(vmw_priv); | 640 | vmw_overlay_stop_all(vmw_priv); |
689 | 641 | ||
690 | ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo); | 642 | ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false); |
691 | if (unlikely(ret != 0)) { | 643 | if (unlikely(ret != 0)) { |
692 | DRM_ERROR("could not move buffer to start of VRAM\n"); | 644 | DRM_ERROR("could not move buffer to start of VRAM\n"); |
693 | goto err_no_buffer; | 645 | goto err_no_buffer; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 61eacc1b5ca3..15fb26088d68 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /************************************************************************** | 1 | /************************************************************************** |
2 | * | 2 | * |
3 | * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA | 3 | * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
@@ -25,149 +25,1100 @@ | |||
25 | * | 25 | * |
26 | **************************************************************************/ | 26 | **************************************************************************/ |
27 | 27 | ||
28 | 28 | #include "drmP.h" | |
29 | #include "vmwgfx_drv.h" | 29 | #include "vmwgfx_drv.h" |
30 | 30 | ||
31 | struct vmw_fence { | 31 | #define VMW_FENCE_WRAP (1 << 31) |
32 | struct list_head head; | 32 | |
33 | uint32_t sequence; | 33 | struct vmw_fence_manager { |
34 | struct timespec submitted; | 34 | int num_fence_objects; |
35 | struct vmw_private *dev_priv; | ||
36 | spinlock_t lock; | ||
37 | struct list_head fence_list; | ||
38 | struct work_struct work; | ||
39 | u32 user_fence_size; | ||
40 | u32 fence_size; | ||
41 | u32 event_fence_action_size; | ||
42 | bool fifo_down; | ||
43 | struct list_head cleanup_list; | ||
44 | uint32_t pending_actions[VMW_ACTION_MAX]; | ||
45 | struct mutex goal_irq_mutex; | ||
46 | bool goal_irq_on; /* Protected by @goal_irq_mutex */ | ||
47 | bool seqno_valid; /* Protected by @lock, and may not be set to true | ||
48 | without the @goal_irq_mutex held. */ | ||
35 | }; | 49 | }; |
36 | 50 | ||
37 | void vmw_fence_queue_init(struct vmw_fence_queue *queue) | 51 | struct vmw_user_fence { |
52 | struct ttm_base_object base; | ||
53 | struct vmw_fence_obj fence; | ||
54 | }; | ||
55 | |||
56 | /** | ||
57 | * struct vmw_event_fence_action - fence action that delivers a drm event. | ||
58 | * | ||
59 | * @e: A struct drm_pending_event that controls the event delivery. | ||
60 | * @action: A struct vmw_fence_action to hook up to a fence. | ||
61 | * @fence: A referenced pointer to the fence to keep it alive while @action | ||
62 | * hangs on it. | ||
63 | * @dev: Pointer to a struct drm_device so we can access the event stuff. | ||
64 | * @kref: Both @e and @action has destructors, so we need to refcount. | ||
65 | * @size: Size accounted for this object. | ||
66 | * @tv_sec: If non-null, the variable pointed to will be assigned | ||
67 | * current time tv_sec val when the fence signals. | ||
68 | * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will | ||
69 | * be assigned the current time tv_usec val when the fence signals. | ||
70 | */ | ||
71 | struct vmw_event_fence_action { | ||
72 | struct drm_pending_event e; | ||
73 | struct vmw_fence_action action; | ||
74 | struct vmw_fence_obj *fence; | ||
75 | struct drm_device *dev; | ||
76 | struct kref kref; | ||
77 | uint32_t size; | ||
78 | uint32_t *tv_sec; | ||
79 | uint32_t *tv_usec; | ||
80 | }; | ||
81 | |||
82 | /** | ||
83 | * Note on fencing subsystem usage of irqs: | ||
84 | * Typically the vmw_fences_update function is called | ||
85 | * | ||
86 | * a) When a new fence seqno has been submitted by the fifo code. | ||
87 | * b) On-demand when we have waiters. Sleeping waiters will switch on the | ||
88 | * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE | ||
89 | * irq is received. When the last fence waiter is gone, that IRQ is masked | ||
90 | * away. | ||
91 | * | ||
92 | * In situations where there are no waiters and we don't submit any new fences, | ||
93 | * fence objects may not be signaled. This is perfectly OK, since there are | ||
94 | * no consumers of the signaled data, but that is NOT ok when there are fence | ||
95 | * actions attached to a fence. The fencing subsystem then makes use of the | ||
96 | * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence | ||
97 | * which has an action attached, and each time vmw_fences_update is called, | ||
98 | * the subsystem makes sure the fence goal seqno is updated. | ||
99 | * | ||
100 | * The fence goal seqno irq is on as long as there are unsignaled fence | ||
101 | * objects with actions attached to them. | ||
102 | */ | ||
103 | |||
104 | static void vmw_fence_obj_destroy_locked(struct kref *kref) | ||
105 | { | ||
106 | struct vmw_fence_obj *fence = | ||
107 | container_of(kref, struct vmw_fence_obj, kref); | ||
108 | |||
109 | struct vmw_fence_manager *fman = fence->fman; | ||
110 | unsigned int num_fences; | ||
111 | |||
112 | list_del_init(&fence->head); | ||
113 | num_fences = --fman->num_fence_objects; | ||
114 | spin_unlock_irq(&fman->lock); | ||
115 | if (fence->destroy) | ||
116 | fence->destroy(fence); | ||
117 | else | ||
118 | kfree(fence); | ||
119 | |||
120 | spin_lock_irq(&fman->lock); | ||
121 | } | ||
122 | |||
123 | |||
124 | /** | ||
125 | * Execute signal actions on fences recently signaled. | ||
126 | * This is done from a workqueue so we don't have to execute | ||
127 | * signal actions from atomic context. | ||
128 | */ | ||
129 | |||
130 | static void vmw_fence_work_func(struct work_struct *work) | ||
38 | { | 131 | { |
39 | INIT_LIST_HEAD(&queue->head); | 132 | struct vmw_fence_manager *fman = |
40 | queue->lag = ns_to_timespec(0); | 133 | container_of(work, struct vmw_fence_manager, work); |
41 | getrawmonotonic(&queue->lag_time); | 134 | struct list_head list; |
42 | spin_lock_init(&queue->lock); | 135 | struct vmw_fence_action *action, *next_action; |
136 | bool seqno_valid; | ||
137 | |||
138 | do { | ||
139 | INIT_LIST_HEAD(&list); | ||
140 | mutex_lock(&fman->goal_irq_mutex); | ||
141 | |||
142 | spin_lock_irq(&fman->lock); | ||
143 | list_splice_init(&fman->cleanup_list, &list); | ||
144 | seqno_valid = fman->seqno_valid; | ||
145 | spin_unlock_irq(&fman->lock); | ||
146 | |||
147 | if (!seqno_valid && fman->goal_irq_on) { | ||
148 | fman->goal_irq_on = false; | ||
149 | vmw_goal_waiter_remove(fman->dev_priv); | ||
150 | } | ||
151 | mutex_unlock(&fman->goal_irq_mutex); | ||
152 | |||
153 | if (list_empty(&list)) | ||
154 | return; | ||
155 | |||
156 | /* | ||
157 | * At this point, only we should be able to manipulate the | ||
158 | * list heads of the actions we have on the private list. | ||
159 | * hence fman::lock not held. | ||
160 | */ | ||
161 | |||
162 | list_for_each_entry_safe(action, next_action, &list, head) { | ||
163 | list_del_init(&action->head); | ||
164 | if (action->cleanup) | ||
165 | action->cleanup(action); | ||
166 | } | ||
167 | } while (1); | ||
43 | } | 168 | } |
44 | 169 | ||
45 | void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) | 170 | struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) |
46 | { | 171 | { |
47 | struct vmw_fence *fence, *next; | 172 | struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); |
48 | 173 | ||
49 | spin_lock(&queue->lock); | 174 | if (unlikely(fman == NULL)) |
50 | list_for_each_entry_safe(fence, next, &queue->head, head) { | 175 | return NULL; |
51 | kfree(fence); | 176 | |
52 | } | 177 | fman->dev_priv = dev_priv; |
53 | spin_unlock(&queue->lock); | 178 | spin_lock_init(&fman->lock); |
179 | INIT_LIST_HEAD(&fman->fence_list); | ||
180 | INIT_LIST_HEAD(&fman->cleanup_list); | ||
181 | INIT_WORK(&fman->work, &vmw_fence_work_func); | ||
182 | fman->fifo_down = true; | ||
183 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); | ||
184 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); | ||
185 | fman->event_fence_action_size = | ||
186 | ttm_round_pot(sizeof(struct vmw_event_fence_action)); | ||
187 | mutex_init(&fman->goal_irq_mutex); | ||
188 | |||
189 | return fman; | ||
54 | } | 190 | } |
55 | 191 | ||
56 | int vmw_fence_push(struct vmw_fence_queue *queue, | 192 | void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) |
57 | uint32_t sequence) | ||
58 | { | 193 | { |
59 | struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); | 194 | unsigned long irq_flags; |
195 | bool lists_empty; | ||
60 | 196 | ||
61 | if (unlikely(!fence)) | 197 | (void) cancel_work_sync(&fman->work); |
62 | return -ENOMEM; | ||
63 | 198 | ||
64 | fence->sequence = sequence; | 199 | spin_lock_irqsave(&fman->lock, irq_flags); |
65 | getrawmonotonic(&fence->submitted); | 200 | lists_empty = list_empty(&fman->fence_list) && |
66 | spin_lock(&queue->lock); | 201 | list_empty(&fman->cleanup_list); |
67 | list_add_tail(&fence->head, &queue->head); | 202 | spin_unlock_irqrestore(&fman->lock, irq_flags); |
68 | spin_unlock(&queue->lock); | ||
69 | 203 | ||
70 | return 0; | 204 | BUG_ON(!lists_empty); |
205 | kfree(fman); | ||
71 | } | 206 | } |
72 | 207 | ||
73 | int vmw_fence_pull(struct vmw_fence_queue *queue, | 208 | static int vmw_fence_obj_init(struct vmw_fence_manager *fman, |
74 | uint32_t signaled_sequence) | 209 | struct vmw_fence_obj *fence, |
210 | u32 seqno, | ||
211 | uint32_t mask, | ||
212 | void (*destroy) (struct vmw_fence_obj *fence)) | ||
75 | { | 213 | { |
76 | struct vmw_fence *fence, *next; | 214 | unsigned long irq_flags; |
77 | struct timespec now; | 215 | unsigned int num_fences; |
78 | bool updated = false; | 216 | int ret = 0; |
79 | 217 | ||
80 | spin_lock(&queue->lock); | 218 | fence->seqno = seqno; |
81 | getrawmonotonic(&now); | 219 | INIT_LIST_HEAD(&fence->seq_passed_actions); |
220 | fence->fman = fman; | ||
221 | fence->signaled = 0; | ||
222 | fence->signal_mask = mask; | ||
223 | kref_init(&fence->kref); | ||
224 | fence->destroy = destroy; | ||
225 | init_waitqueue_head(&fence->queue); | ||
82 | 226 | ||
83 | if (list_empty(&queue->head)) { | 227 | spin_lock_irqsave(&fman->lock, irq_flags); |
84 | queue->lag = ns_to_timespec(0); | 228 | if (unlikely(fman->fifo_down)) { |
85 | queue->lag_time = now; | 229 | ret = -EBUSY; |
86 | updated = true; | ||
87 | goto out_unlock; | 230 | goto out_unlock; |
88 | } | 231 | } |
232 | list_add_tail(&fence->head, &fman->fence_list); | ||
233 | num_fences = ++fman->num_fence_objects; | ||
89 | 234 | ||
90 | list_for_each_entry_safe(fence, next, &queue->head, head) { | 235 | out_unlock: |
91 | if (signaled_sequence - fence->sequence > (1 << 30)) | 236 | spin_unlock_irqrestore(&fman->lock, irq_flags); |
92 | continue; | 237 | return ret; |
93 | 238 | ||
94 | queue->lag = timespec_sub(now, fence->submitted); | 239 | } |
95 | queue->lag_time = now; | 240 | |
96 | updated = true; | 241 | struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence) |
97 | list_del(&fence->head); | 242 | { |
98 | kfree(fence); | 243 | if (unlikely(fence == NULL)) |
244 | return NULL; | ||
245 | |||
246 | kref_get(&fence->kref); | ||
247 | return fence; | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * vmw_fence_obj_unreference | ||
252 | * | ||
253 | * Note that this function may not be entered with disabled irqs since | ||
254 | * it may re-enable them in the destroy function. | ||
255 | * | ||
256 | */ | ||
257 | void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p) | ||
258 | { | ||
259 | struct vmw_fence_obj *fence = *fence_p; | ||
260 | struct vmw_fence_manager *fman; | ||
261 | |||
262 | if (unlikely(fence == NULL)) | ||
263 | return; | ||
264 | |||
265 | fman = fence->fman; | ||
266 | *fence_p = NULL; | ||
267 | spin_lock_irq(&fman->lock); | ||
268 | BUG_ON(atomic_read(&fence->kref.refcount) == 0); | ||
269 | kref_put(&fence->kref, vmw_fence_obj_destroy_locked); | ||
270 | spin_unlock_irq(&fman->lock); | ||
271 | } | ||
272 | |||
273 | void vmw_fences_perform_actions(struct vmw_fence_manager *fman, | ||
274 | struct list_head *list) | ||
275 | { | ||
276 | struct vmw_fence_action *action, *next_action; | ||
277 | |||
278 | list_for_each_entry_safe(action, next_action, list, head) { | ||
279 | list_del_init(&action->head); | ||
280 | fman->pending_actions[action->type]--; | ||
281 | if (action->seq_passed != NULL) | ||
282 | action->seq_passed(action); | ||
283 | |||
284 | /* | ||
285 | * Add the cleanup action to the cleanup list so that | ||
286 | * it will be performed by a worker task. | ||
287 | */ | ||
288 | |||
289 | list_add_tail(&action->head, &fman->cleanup_list); | ||
99 | } | 290 | } |
291 | } | ||
292 | |||
293 | /** | ||
294 | * vmw_fence_goal_new_locked - Figure out a new device fence goal | ||
295 | * seqno if needed. | ||
296 | * | ||
297 | * @fman: Pointer to a fence manager. | ||
298 | * @passed_seqno: The seqno the device currently signals as passed. | ||
299 | * | ||
300 | * This function should be called with the fence manager lock held. | ||
301 | * It is typically called when we have a new passed_seqno, and | ||
302 | * we might need to update the fence goal. It checks to see whether | ||
303 | * the current fence goal has already passed, and, in that case, | ||
304 | * scans through all unsignaled fences to get the next fence object with an | ||
305 | * action attached, and sets the seqno of that fence as a new fence goal. | ||
306 | * | ||
307 | * returns true if the device goal seqno was updated. False otherwise. | ||
308 | */ | ||
309 | static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, | ||
310 | u32 passed_seqno) | ||
311 | { | ||
312 | u32 goal_seqno; | ||
313 | __le32 __iomem *fifo_mem; | ||
314 | struct vmw_fence_obj *fence; | ||
315 | |||
316 | if (likely(!fman->seqno_valid)) | ||
317 | return false; | ||
318 | |||
319 | fifo_mem = fman->dev_priv->mmio_virt; | ||
320 | goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); | ||
321 | if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) | ||
322 | return false; | ||
323 | |||
324 | fman->seqno_valid = false; | ||
325 | list_for_each_entry(fence, &fman->fence_list, head) { | ||
326 | if (!list_empty(&fence->seq_passed_actions)) { | ||
327 | fman->seqno_valid = true; | ||
328 | iowrite32(fence->seqno, | ||
329 | fifo_mem + SVGA_FIFO_FENCE_GOAL); | ||
330 | break; | ||
331 | } | ||
332 | } | ||
333 | |||
334 | return true; | ||
335 | } | ||
100 | 336 | ||
101 | out_unlock: | ||
102 | spin_unlock(&queue->lock); | ||
103 | 337 | ||
104 | return (updated) ? 0 : -EBUSY; | 338 | /** |
339 | * vmw_fence_goal_check_locked - Replace the device fence goal seqno if | ||
340 | * needed. | ||
341 | * | ||
342 | * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be | ||
343 | * considered as a device fence goal. | ||
344 | * | ||
345 | * This function should be called with the fence manager lock held. | ||
346 | * It is typically called when an action has been attached to a fence to | ||
347 | * check whether the seqno of that fence should be used for a fence | ||
348 | * goal interrupt. This is typically needed if the current fence goal is | ||
349 | * invalid, or has a higher seqno than that of the current fence object. | ||
350 | * | ||
351 | * returns true if the device goal seqno was updated. False otherwise. | ||
352 | */ | ||
353 | static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) | ||
354 | { | ||
355 | u32 goal_seqno; | ||
356 | __le32 __iomem *fifo_mem; | ||
357 | |||
358 | if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) | ||
359 | return false; | ||
360 | |||
361 | fifo_mem = fence->fman->dev_priv->mmio_virt; | ||
362 | goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); | ||
363 | if (likely(fence->fman->seqno_valid && | ||
364 | goal_seqno - fence->seqno < VMW_FENCE_WRAP)) | ||
365 | return false; | ||
366 | |||
367 | iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); | ||
368 | fence->fman->seqno_valid = true; | ||
369 | |||
370 | return true; | ||
105 | } | 371 | } |
106 | 372 | ||
107 | static struct timespec vmw_timespec_add(struct timespec t1, | 373 | void vmw_fences_update(struct vmw_fence_manager *fman) |
108 | struct timespec t2) | ||
109 | { | 374 | { |
110 | t1.tv_sec += t2.tv_sec; | 375 | unsigned long flags; |
111 | t1.tv_nsec += t2.tv_nsec; | 376 | struct vmw_fence_obj *fence, *next_fence; |
112 | if (t1.tv_nsec >= 1000000000L) { | 377 | struct list_head action_list; |
113 | t1.tv_sec += 1; | 378 | bool needs_rerun; |
114 | t1.tv_nsec -= 1000000000L; | 379 | uint32_t seqno, new_seqno; |
380 | __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt; | ||
381 | |||
382 | seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
383 | rerun: | ||
384 | spin_lock_irqsave(&fman->lock, flags); | ||
385 | list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { | ||
386 | if (seqno - fence->seqno < VMW_FENCE_WRAP) { | ||
387 | list_del_init(&fence->head); | ||
388 | fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC; | ||
389 | INIT_LIST_HEAD(&action_list); | ||
390 | list_splice_init(&fence->seq_passed_actions, | ||
391 | &action_list); | ||
392 | vmw_fences_perform_actions(fman, &action_list); | ||
393 | wake_up_all(&fence->queue); | ||
394 | } else | ||
395 | break; | ||
115 | } | 396 | } |
116 | 397 | ||
117 | return t1; | 398 | needs_rerun = vmw_fence_goal_new_locked(fman, seqno); |
399 | |||
400 | if (!list_empty(&fman->cleanup_list)) | ||
401 | (void) schedule_work(&fman->work); | ||
402 | spin_unlock_irqrestore(&fman->lock, flags); | ||
403 | |||
404 | /* | ||
405 | * Rerun if the fence goal seqno was updated, and the | ||
406 | * hardware might have raced with that update, so that | ||
407 | * we missed a fence_goal irq. | ||
408 | */ | ||
409 | |||
410 | if (unlikely(needs_rerun)) { | ||
411 | new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
412 | if (new_seqno != seqno) { | ||
413 | seqno = new_seqno; | ||
414 | goto rerun; | ||
415 | } | ||
416 | } | ||
417 | } | ||
418 | |||
419 | bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, | ||
420 | uint32_t flags) | ||
421 | { | ||
422 | struct vmw_fence_manager *fman = fence->fman; | ||
423 | unsigned long irq_flags; | ||
424 | uint32_t signaled; | ||
425 | |||
426 | spin_lock_irqsave(&fman->lock, irq_flags); | ||
427 | signaled = fence->signaled; | ||
428 | spin_unlock_irqrestore(&fman->lock, irq_flags); | ||
429 | |||
430 | flags &= fence->signal_mask; | ||
431 | if ((signaled & flags) == flags) | ||
432 | return 1; | ||
433 | |||
434 | if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) | ||
435 | vmw_fences_update(fman); | ||
436 | |||
437 | spin_lock_irqsave(&fman->lock, irq_flags); | ||
438 | signaled = fence->signaled; | ||
439 | spin_unlock_irqrestore(&fman->lock, irq_flags); | ||
440 | |||
441 | return ((signaled & flags) == flags); | ||
118 | } | 442 | } |
119 | 443 | ||
120 | static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) | 444 | int vmw_fence_obj_wait(struct vmw_fence_obj *fence, |
445 | uint32_t flags, bool lazy, | ||
446 | bool interruptible, unsigned long timeout) | ||
121 | { | 447 | { |
122 | struct timespec now; | 448 | struct vmw_private *dev_priv = fence->fman->dev_priv; |
449 | long ret; | ||
450 | |||
451 | if (likely(vmw_fence_obj_signaled(fence, flags))) | ||
452 | return 0; | ||
453 | |||
454 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
455 | vmw_seqno_waiter_add(dev_priv); | ||
456 | |||
457 | if (interruptible) | ||
458 | ret = wait_event_interruptible_timeout | ||
459 | (fence->queue, | ||
460 | vmw_fence_obj_signaled(fence, flags), | ||
461 | timeout); | ||
462 | else | ||
463 | ret = wait_event_timeout | ||
464 | (fence->queue, | ||
465 | vmw_fence_obj_signaled(fence, flags), | ||
466 | timeout); | ||
467 | |||
468 | vmw_seqno_waiter_remove(dev_priv); | ||
469 | |||
470 | if (unlikely(ret == 0)) | ||
471 | ret = -EBUSY; | ||
472 | else if (likely(ret > 0)) | ||
473 | ret = 0; | ||
474 | |||
475 | return ret; | ||
476 | } | ||
477 | |||
478 | void vmw_fence_obj_flush(struct vmw_fence_obj *fence) | ||
479 | { | ||
480 | struct vmw_private *dev_priv = fence->fman->dev_priv; | ||
481 | |||
482 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
483 | } | ||
484 | |||
485 | static void vmw_fence_destroy(struct vmw_fence_obj *fence) | ||
486 | { | ||
487 | struct vmw_fence_manager *fman = fence->fman; | ||
488 | |||
489 | kfree(fence); | ||
490 | /* | ||
491 | * Free kernel space accounting. | ||
492 | */ | ||
493 | ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), | ||
494 | fman->fence_size); | ||
495 | } | ||
496 | |||
497 | int vmw_fence_create(struct vmw_fence_manager *fman, | ||
498 | uint32_t seqno, | ||
499 | uint32_t mask, | ||
500 | struct vmw_fence_obj **p_fence) | ||
501 | { | ||
502 | struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); | ||
503 | struct vmw_fence_obj *fence; | ||
504 | int ret; | ||
505 | |||
506 | ret = ttm_mem_global_alloc(mem_glob, fman->fence_size, | ||
507 | false, false); | ||
508 | if (unlikely(ret != 0)) | ||
509 | return ret; | ||
510 | |||
511 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); | ||
512 | if (unlikely(fence == NULL)) { | ||
513 | ret = -ENOMEM; | ||
514 | goto out_no_object; | ||
515 | } | ||
516 | |||
517 | ret = vmw_fence_obj_init(fman, fence, seqno, mask, | ||
518 | vmw_fence_destroy); | ||
519 | if (unlikely(ret != 0)) | ||
520 | goto out_err_init; | ||
521 | |||
522 | *p_fence = fence; | ||
523 | return 0; | ||
524 | |||
525 | out_err_init: | ||
526 | kfree(fence); | ||
527 | out_no_object: | ||
528 | ttm_mem_global_free(mem_glob, fman->fence_size); | ||
529 | return ret; | ||
530 | } | ||
531 | |||
532 | |||
533 | static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) | ||
534 | { | ||
535 | struct vmw_user_fence *ufence = | ||
536 | container_of(fence, struct vmw_user_fence, fence); | ||
537 | struct vmw_fence_manager *fman = fence->fman; | ||
538 | |||
539 | kfree(ufence); | ||
540 | /* | ||
541 | * Free kernel space accounting. | ||
542 | */ | ||
543 | ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), | ||
544 | fman->user_fence_size); | ||
545 | } | ||
546 | |||
547 | static void vmw_user_fence_base_release(struct ttm_base_object **p_base) | ||
548 | { | ||
549 | struct ttm_base_object *base = *p_base; | ||
550 | struct vmw_user_fence *ufence = | ||
551 | container_of(base, struct vmw_user_fence, base); | ||
552 | struct vmw_fence_obj *fence = &ufence->fence; | ||
553 | |||
554 | *p_base = NULL; | ||
555 | vmw_fence_obj_unreference(&fence); | ||
556 | } | ||
557 | |||
558 | int vmw_user_fence_create(struct drm_file *file_priv, | ||
559 | struct vmw_fence_manager *fman, | ||
560 | uint32_t seqno, | ||
561 | uint32_t mask, | ||
562 | struct vmw_fence_obj **p_fence, | ||
563 | uint32_t *p_handle) | ||
564 | { | ||
565 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
566 | struct vmw_user_fence *ufence; | ||
567 | struct vmw_fence_obj *tmp; | ||
568 | struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); | ||
569 | int ret; | ||
570 | |||
571 | /* | ||
572 | * Kernel memory space accounting, since this object may | ||
573 | * be created by a user-space request. | ||
574 | */ | ||
575 | |||
576 | ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, | ||
577 | false, false); | ||
578 | if (unlikely(ret != 0)) | ||
579 | return ret; | ||
580 | |||
581 | ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); | ||
582 | if (unlikely(ufence == NULL)) { | ||
583 | ret = -ENOMEM; | ||
584 | goto out_no_object; | ||
585 | } | ||
586 | |||
587 | ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, | ||
588 | mask, vmw_user_fence_destroy); | ||
589 | if (unlikely(ret != 0)) { | ||
590 | kfree(ufence); | ||
591 | goto out_no_object; | ||
592 | } | ||
593 | |||
594 | /* | ||
595 | * The base object holds a reference which is freed in | ||
596 | * vmw_user_fence_base_release. | ||
597 | */ | ||
598 | tmp = vmw_fence_obj_reference(&ufence->fence); | ||
599 | ret = ttm_base_object_init(tfile, &ufence->base, false, | ||
600 | VMW_RES_FENCE, | ||
601 | &vmw_user_fence_base_release, NULL); | ||
123 | 602 | ||
124 | spin_lock(&queue->lock); | 603 | |
125 | getrawmonotonic(&now); | 604 | if (unlikely(ret != 0)) { |
126 | queue->lag = vmw_timespec_add(queue->lag, | 605 | /* |
127 | timespec_sub(now, queue->lag_time)); | 606 | * Free the base object's reference |
128 | queue->lag_time = now; | 607 | */ |
129 | spin_unlock(&queue->lock); | 608 | vmw_fence_obj_unreference(&tmp); |
130 | return queue->lag; | 609 | goto out_err; |
610 | } | ||
611 | |||
612 | *p_fence = &ufence->fence; | ||
613 | *p_handle = ufence->base.hash.key; | ||
614 | |||
615 | return 0; | ||
616 | out_err: | ||
617 | tmp = &ufence->fence; | ||
618 | vmw_fence_obj_unreference(&tmp); | ||
619 | out_no_object: | ||
620 | ttm_mem_global_free(mem_glob, fman->user_fence_size); | ||
621 | return ret; | ||
131 | } | 622 | } |
132 | 623 | ||
133 | 624 | ||
134 | static bool vmw_lag_lt(struct vmw_fence_queue *queue, | 625 | /** |
135 | uint32_t us) | 626 | * vmw_fence_fifo_down - signal all unsignaled fence objects. |
627 | */ | ||
628 | |||
629 | void vmw_fence_fifo_down(struct vmw_fence_manager *fman) | ||
630 | { | ||
631 | unsigned long irq_flags; | ||
632 | struct list_head action_list; | ||
633 | int ret; | ||
634 | |||
635 | /* | ||
636 | * The list may be altered while we traverse it, so always | ||
637 | * restart when we've released the fman->lock. | ||
638 | */ | ||
639 | |||
640 | spin_lock_irqsave(&fman->lock, irq_flags); | ||
641 | fman->fifo_down = true; | ||
642 | while (!list_empty(&fman->fence_list)) { | ||
643 | struct vmw_fence_obj *fence = | ||
644 | list_entry(fman->fence_list.prev, struct vmw_fence_obj, | ||
645 | head); | ||
646 | kref_get(&fence->kref); | ||
647 | spin_unlock_irq(&fman->lock); | ||
648 | |||
649 | ret = vmw_fence_obj_wait(fence, fence->signal_mask, | ||
650 | false, false, | ||
651 | VMW_FENCE_WAIT_TIMEOUT); | ||
652 | |||
653 | if (unlikely(ret != 0)) { | ||
654 | list_del_init(&fence->head); | ||
655 | fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC; | ||
656 | INIT_LIST_HEAD(&action_list); | ||
657 | list_splice_init(&fence->seq_passed_actions, | ||
658 | &action_list); | ||
659 | vmw_fences_perform_actions(fman, &action_list); | ||
660 | wake_up_all(&fence->queue); | ||
661 | } | ||
662 | |||
663 | spin_lock_irq(&fman->lock); | ||
664 | |||
665 | BUG_ON(!list_empty(&fence->head)); | ||
666 | kref_put(&fence->kref, vmw_fence_obj_destroy_locked); | ||
667 | } | ||
668 | spin_unlock_irqrestore(&fman->lock, irq_flags); | ||
669 | } | ||
670 | |||
671 | void vmw_fence_fifo_up(struct vmw_fence_manager *fman) | ||
136 | { | 672 | { |
137 | struct timespec lag, cond; | 673 | unsigned long irq_flags; |
138 | 674 | ||
139 | cond = ns_to_timespec((s64) us * 1000); | 675 | spin_lock_irqsave(&fman->lock, irq_flags); |
140 | lag = vmw_fifo_lag(queue); | 676 | fman->fifo_down = false; |
141 | return (timespec_compare(&lag, &cond) < 1); | 677 | spin_unlock_irqrestore(&fman->lock, irq_flags); |
142 | } | 678 | } |
143 | 679 | ||
144 | int vmw_wait_lag(struct vmw_private *dev_priv, | 680 | |
145 | struct vmw_fence_queue *queue, uint32_t us) | 681 | int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, |
682 | struct drm_file *file_priv) | ||
146 | { | 683 | { |
147 | struct vmw_fence *fence; | 684 | struct drm_vmw_fence_wait_arg *arg = |
148 | uint32_t sequence; | 685 | (struct drm_vmw_fence_wait_arg *)data; |
686 | unsigned long timeout; | ||
687 | struct ttm_base_object *base; | ||
688 | struct vmw_fence_obj *fence; | ||
689 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
149 | int ret; | 690 | int ret; |
691 | uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); | ||
692 | |||
693 | /* | ||
694 | * 64-bit division not present on 32-bit systems, so do an | ||
695 | * approximation. (Divide by 1000000). | ||
696 | */ | ||
697 | |||
698 | wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - | ||
699 | (wait_timeout >> 26); | ||
700 | |||
701 | if (!arg->cookie_valid) { | ||
702 | arg->cookie_valid = 1; | ||
703 | arg->kernel_cookie = jiffies + wait_timeout; | ||
704 | } | ||
705 | |||
706 | base = ttm_base_object_lookup(tfile, arg->handle); | ||
707 | if (unlikely(base == NULL)) { | ||
708 | printk(KERN_ERR "Wait invalid fence object handle " | ||
709 | "0x%08lx.\n", | ||
710 | (unsigned long)arg->handle); | ||
711 | return -EINVAL; | ||
712 | } | ||
713 | |||
714 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); | ||
715 | |||
716 | timeout = jiffies; | ||
717 | if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { | ||
718 | ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ? | ||
719 | 0 : -EBUSY); | ||
720 | goto out; | ||
721 | } | ||
722 | |||
723 | timeout = (unsigned long)arg->kernel_cookie - timeout; | ||
724 | |||
725 | ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout); | ||
726 | |||
727 | out: | ||
728 | ttm_base_object_unref(&base); | ||
729 | |||
730 | /* | ||
731 | * Optionally unref the fence object. | ||
732 | */ | ||
733 | |||
734 | if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) | ||
735 | return ttm_ref_object_base_unref(tfile, arg->handle, | ||
736 | TTM_REF_USAGE); | ||
737 | return ret; | ||
738 | } | ||
739 | |||
740 | int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, | ||
741 | struct drm_file *file_priv) | ||
742 | { | ||
743 | struct drm_vmw_fence_signaled_arg *arg = | ||
744 | (struct drm_vmw_fence_signaled_arg *) data; | ||
745 | struct ttm_base_object *base; | ||
746 | struct vmw_fence_obj *fence; | ||
747 | struct vmw_fence_manager *fman; | ||
748 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
749 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
750 | |||
751 | base = ttm_base_object_lookup(tfile, arg->handle); | ||
752 | if (unlikely(base == NULL)) { | ||
753 | printk(KERN_ERR "Fence signaled invalid fence object handle " | ||
754 | "0x%08lx.\n", | ||
755 | (unsigned long)arg->handle); | ||
756 | return -EINVAL; | ||
757 | } | ||
758 | |||
759 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); | ||
760 | fman = fence->fman; | ||
761 | |||
762 | arg->signaled = vmw_fence_obj_signaled(fence, arg->flags); | ||
763 | spin_lock_irq(&fman->lock); | ||
764 | |||
765 | arg->signaled_flags = fence->signaled; | ||
766 | arg->passed_seqno = dev_priv->last_read_seqno; | ||
767 | spin_unlock_irq(&fman->lock); | ||
768 | |||
769 | ttm_base_object_unref(&base); | ||
770 | |||
771 | return 0; | ||
772 | } | ||
773 | |||
774 | |||
775 | int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, | ||
776 | struct drm_file *file_priv) | ||
777 | { | ||
778 | struct drm_vmw_fence_arg *arg = | ||
779 | (struct drm_vmw_fence_arg *) data; | ||
780 | |||
781 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | ||
782 | arg->handle, | ||
783 | TTM_REF_USAGE); | ||
784 | } | ||
785 | |||
786 | /** | ||
787 | * vmw_event_fence_action_destroy | ||
788 | * | ||
789 | * @kref: The struct kref embedded in a struct vmw_event_fence_action. | ||
790 | * | ||
791 | * The vmw_event_fence_action destructor that may be called either after | ||
792 | * the fence action cleanup, or when the event is delivered. | ||
793 | * It frees both the vmw_event_fence_action struct and the actual | ||
794 | * event structure copied to user-space. | ||
795 | */ | ||
796 | static void vmw_event_fence_action_destroy(struct kref *kref) | ||
797 | { | ||
798 | struct vmw_event_fence_action *eaction = | ||
799 | container_of(kref, struct vmw_event_fence_action, kref); | ||
800 | struct ttm_mem_global *mem_glob = | ||
801 | vmw_mem_glob(vmw_priv(eaction->dev)); | ||
802 | uint32_t size = eaction->size; | ||
803 | |||
804 | kfree(eaction->e.event); | ||
805 | kfree(eaction); | ||
806 | ttm_mem_global_free(mem_glob, size); | ||
807 | } | ||
808 | |||
809 | |||
810 | /** | ||
811 | * vmw_event_fence_action_delivered | ||
812 | * | ||
813 | * @e: The struct drm_pending_event embedded in a struct | ||
814 | * vmw_event_fence_action. | ||
815 | * | ||
816 | * The struct drm_pending_event destructor that is called by drm | ||
817 | * once the event is delivered. Since we don't know whether this function | ||
818 | * will be called before or after the fence action destructor, we | ||
819 | * free a refcount and destroy if it becomes zero. | ||
820 | */ | ||
821 | static void vmw_event_fence_action_delivered(struct drm_pending_event *e) | ||
822 | { | ||
823 | struct vmw_event_fence_action *eaction = | ||
824 | container_of(e, struct vmw_event_fence_action, e); | ||
825 | |||
826 | kref_put(&eaction->kref, vmw_event_fence_action_destroy); | ||
827 | } | ||
828 | |||
829 | |||
830 | /** | ||
831 | * vmw_event_fence_action_seq_passed | ||
832 | * | ||
833 | * @action: The struct vmw_fence_action embedded in a struct | ||
834 | * vmw_event_fence_action. | ||
835 | * | ||
836 | * This function is called when the seqno of the fence where @action is | ||
837 | * attached has passed. It queues the event on the submitter's event list. | ||
838 | * This function is always called from atomic context, and may be called | ||
839 | * from irq context. It ups a refcount reflecting that we now have two | ||
840 | * destructors. | ||
841 | */ | ||
842 | static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) | ||
843 | { | ||
844 | struct vmw_event_fence_action *eaction = | ||
845 | container_of(action, struct vmw_event_fence_action, action); | ||
846 | struct drm_device *dev = eaction->dev; | ||
847 | struct drm_file *file_priv = eaction->e.file_priv; | ||
848 | unsigned long irq_flags; | ||
849 | |||
850 | kref_get(&eaction->kref); | ||
851 | spin_lock_irqsave(&dev->event_lock, irq_flags); | ||
852 | |||
853 | if (likely(eaction->tv_sec != NULL)) { | ||
854 | struct timeval tv; | ||
855 | |||
856 | do_gettimeofday(&tv); | ||
857 | *eaction->tv_sec = tv.tv_sec; | ||
858 | *eaction->tv_usec = tv.tv_usec; | ||
859 | } | ||
860 | |||
861 | list_add_tail(&eaction->e.link, &file_priv->event_list); | ||
862 | wake_up_all(&file_priv->event_wait); | ||
863 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | ||
864 | } | ||
865 | |||
866 | /** | ||
867 | * vmw_event_fence_action_cleanup | ||
868 | * | ||
869 | * @action: The struct vmw_fence_action embedded in a struct | ||
870 | * vmw_event_fence_action. | ||
871 | * | ||
872 | * This function is the struct vmw_fence_action destructor. It's typically | ||
873 | * called from a workqueue. | ||
874 | */ | ||
875 | static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) | ||
876 | { | ||
877 | struct vmw_event_fence_action *eaction = | ||
878 | container_of(action, struct vmw_event_fence_action, action); | ||
879 | |||
880 | vmw_fence_obj_unreference(&eaction->fence); | ||
881 | kref_put(&eaction->kref, vmw_event_fence_action_destroy); | ||
882 | } | ||
883 | |||
884 | |||
885 | /** | ||
886 | * vmw_fence_obj_add_action - Add an action to a fence object. | ||
887 | * | ||
888 | * @fence - The fence object. | ||
889 | * @action - The action to add. | ||
890 | * | ||
891 | * Note that the action callbacks may be executed before this function | ||
892 | * returns. | ||
893 | */ | ||
894 | void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, | ||
895 | struct vmw_fence_action *action) | ||
896 | { | ||
897 | struct vmw_fence_manager *fman = fence->fman; | ||
898 | unsigned long irq_flags; | ||
899 | bool run_update = false; | ||
900 | |||
901 | mutex_lock(&fman->goal_irq_mutex); | ||
902 | spin_lock_irqsave(&fman->lock, irq_flags); | ||
903 | |||
904 | fman->pending_actions[action->type]++; | ||
905 | if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) { | ||
906 | struct list_head action_list; | ||
150 | 907 | ||
151 | while (!vmw_lag_lt(queue, us)) { | 908 | INIT_LIST_HEAD(&action_list); |
152 | spin_lock(&queue->lock); | 909 | list_add_tail(&action->head, &action_list); |
153 | if (list_empty(&queue->head)) | 910 | vmw_fences_perform_actions(fman, &action_list); |
154 | sequence = atomic_read(&dev_priv->fence_seq); | 911 | } else { |
155 | else { | 912 | list_add_tail(&action->head, &fence->seq_passed_actions); |
156 | fence = list_first_entry(&queue->head, | 913 | |
157 | struct vmw_fence, head); | 914 | /* |
158 | sequence = fence->sequence; | 915 | * This function may set fman::seqno_valid, so it must |
916 | * be run with the goal_irq_mutex held. | ||
917 | */ | ||
918 | run_update = vmw_fence_goal_check_locked(fence); | ||
919 | } | ||
920 | |||
921 | spin_unlock_irqrestore(&fman->lock, irq_flags); | ||
922 | |||
923 | if (run_update) { | ||
924 | if (!fman->goal_irq_on) { | ||
925 | fman->goal_irq_on = true; | ||
926 | vmw_goal_waiter_add(fman->dev_priv); | ||
159 | } | 927 | } |
160 | spin_unlock(&queue->lock); | 928 | vmw_fences_update(fman); |
929 | } | ||
930 | mutex_unlock(&fman->goal_irq_mutex); | ||
161 | 931 | ||
162 | ret = vmw_wait_fence(dev_priv, false, sequence, true, | 932 | } |
163 | 3*HZ); | ||
164 | 933 | ||
165 | if (unlikely(ret != 0)) | 934 | /** |
166 | return ret; | 935 | * vmw_event_fence_action_create - Post an event for sending when a fence |
936 | * object seqno has passed. | ||
937 | * | ||
938 | * @file_priv: The file connection on which the event should be posted. | ||
939 | * @fence: The fence object on which to post the event. | ||
940 | * @event: Event to be posted. This event should've been alloced | ||
941 | * using k[mz]alloc, and should've been completely initialized. | ||
942 | * @interruptible: Interruptible waits if possible. | ||
943 | * | ||
944 | * As a side effect, the object pointed to by @event may have been | ||
945 | * freed when this function returns. If this function returns with | ||
946 | * an error code, the caller needs to free that object. | ||
947 | */ | ||
948 | |||
949 | int vmw_event_fence_action_create(struct drm_file *file_priv, | ||
950 | struct vmw_fence_obj *fence, | ||
951 | struct drm_event *event, | ||
952 | uint32_t *tv_sec, | ||
953 | uint32_t *tv_usec, | ||
954 | bool interruptible) | ||
955 | { | ||
956 | struct vmw_event_fence_action *eaction; | ||
957 | struct ttm_mem_global *mem_glob = | ||
958 | vmw_mem_glob(fence->fman->dev_priv); | ||
959 | struct vmw_fence_manager *fman = fence->fman; | ||
960 | uint32_t size = fman->event_fence_action_size + | ||
961 | ttm_round_pot(event->length); | ||
962 | int ret; | ||
963 | |||
964 | /* | ||
965 | * Account for internal structure size as well as the | ||
966 | * event size itself. | ||
967 | */ | ||
968 | |||
969 | ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible); | ||
970 | if (unlikely(ret != 0)) | ||
971 | return ret; | ||
167 | 972 | ||
168 | (void) vmw_fence_pull(queue, sequence); | 973 | eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); |
974 | if (unlikely(eaction == NULL)) { | ||
975 | ttm_mem_global_free(mem_glob, size); | ||
976 | return -ENOMEM; | ||
169 | } | 977 | } |
978 | |||
979 | eaction->e.event = event; | ||
980 | eaction->e.file_priv = file_priv; | ||
981 | eaction->e.destroy = vmw_event_fence_action_delivered; | ||
982 | |||
983 | eaction->action.seq_passed = vmw_event_fence_action_seq_passed; | ||
984 | eaction->action.cleanup = vmw_event_fence_action_cleanup; | ||
985 | eaction->action.type = VMW_ACTION_EVENT; | ||
986 | |||
987 | eaction->fence = vmw_fence_obj_reference(fence); | ||
988 | eaction->dev = fman->dev_priv->dev; | ||
989 | eaction->size = size; | ||
990 | eaction->tv_sec = tv_sec; | ||
991 | eaction->tv_usec = tv_usec; | ||
992 | |||
993 | kref_init(&eaction->kref); | ||
994 | vmw_fence_obj_add_action(fence, &eaction->action); | ||
995 | |||
170 | return 0; | 996 | return 0; |
171 | } | 997 | } |
172 | 998 | ||
999 | int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | ||
1000 | struct drm_file *file_priv) | ||
1001 | { | ||
1002 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1003 | struct drm_vmw_fence_event_arg *arg = | ||
1004 | (struct drm_vmw_fence_event_arg *) data; | ||
1005 | struct vmw_fence_obj *fence = NULL; | ||
1006 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
1007 | struct drm_vmw_fence_rep __user *user_fence_rep = | ||
1008 | (struct drm_vmw_fence_rep __user *)(unsigned long) | ||
1009 | arg->fence_rep; | ||
1010 | uint32_t handle; | ||
1011 | unsigned long irq_flags; | ||
1012 | struct drm_vmw_event_fence *event; | ||
1013 | int ret; | ||
1014 | |||
1015 | /* | ||
1016 | * Look up an existing fence object, | ||
1017 | * and if user-space wants a new reference, | ||
1018 | * add one. | ||
1019 | */ | ||
1020 | if (arg->handle) { | ||
1021 | struct ttm_base_object *base = | ||
1022 | ttm_base_object_lookup(vmw_fp->tfile, arg->handle); | ||
173 | 1023 | ||
1024 | if (unlikely(base == NULL)) { | ||
1025 | DRM_ERROR("Fence event invalid fence object handle " | ||
1026 | "0x%08lx.\n", | ||
1027 | (unsigned long)arg->handle); | ||
1028 | return -EINVAL; | ||
1029 | } | ||
1030 | fence = &(container_of(base, struct vmw_user_fence, | ||
1031 | base)->fence); | ||
1032 | (void) vmw_fence_obj_reference(fence); | ||
1033 | |||
1034 | if (user_fence_rep != NULL) { | ||
1035 | bool existed; | ||
1036 | |||
1037 | ret = ttm_ref_object_add(vmw_fp->tfile, base, | ||
1038 | TTM_REF_USAGE, &existed); | ||
1039 | if (unlikely(ret != 0)) { | ||
1040 | DRM_ERROR("Failed to reference a fence " | ||
1041 | "object.\n"); | ||
1042 | goto out_no_ref_obj; | ||
1043 | } | ||
1044 | handle = base->hash.key; | ||
1045 | } | ||
1046 | ttm_base_object_unref(&base); | ||
1047 | } | ||
1048 | |||
1049 | /* | ||
1050 | * Create a new fence object. | ||
1051 | */ | ||
1052 | if (!fence) { | ||
1053 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, | ||
1054 | &fence, | ||
1055 | (user_fence_rep) ? | ||
1056 | &handle : NULL); | ||
1057 | if (unlikely(ret != 0)) { | ||
1058 | DRM_ERROR("Fence event failed to create fence.\n"); | ||
1059 | return ret; | ||
1060 | } | ||
1061 | } | ||
1062 | |||
1063 | BUG_ON(fence == NULL); | ||
1064 | |||
1065 | spin_lock_irqsave(&dev->event_lock, irq_flags); | ||
1066 | |||
1067 | ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0; | ||
1068 | if (likely(ret == 0)) | ||
1069 | file_priv->event_space -= sizeof(*event); | ||
1070 | |||
1071 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | ||
1072 | |||
1073 | if (unlikely(ret != 0)) { | ||
1074 | DRM_ERROR("Failed to allocate event space for this file.\n"); | ||
1075 | goto out_no_event_space; | ||
1076 | } | ||
1077 | |||
1078 | event = kzalloc(sizeof(*event), GFP_KERNEL); | ||
1079 | if (unlikely(event == NULL)) { | ||
1080 | DRM_ERROR("Failed to allocate an event.\n"); | ||
1081 | goto out_no_event; | ||
1082 | } | ||
1083 | |||
1084 | event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED; | ||
1085 | event->base.length = sizeof(*event); | ||
1086 | event->user_data = arg->user_data; | ||
1087 | |||
1088 | if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME) | ||
1089 | ret = vmw_event_fence_action_create(file_priv, fence, | ||
1090 | &event->base, | ||
1091 | &event->tv_sec, | ||
1092 | &event->tv_usec, | ||
1093 | true); | ||
1094 | else | ||
1095 | ret = vmw_event_fence_action_create(file_priv, fence, | ||
1096 | &event->base, | ||
1097 | NULL, | ||
1098 | NULL, | ||
1099 | true); | ||
1100 | |||
1101 | if (unlikely(ret != 0)) { | ||
1102 | if (ret != -ERESTARTSYS) | ||
1103 | DRM_ERROR("Failed to attach event to fence.\n"); | ||
1104 | goto out_no_attach; | ||
1105 | } | ||
1106 | |||
1107 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, | ||
1108 | handle); | ||
1109 | vmw_fence_obj_unreference(&fence); | ||
1110 | return 0; | ||
1111 | out_no_attach: | ||
1112 | kfree(event); | ||
1113 | out_no_event: | ||
1114 | spin_lock_irqsave(&dev->event_lock, irq_flags); | ||
1115 | file_priv->event_space += sizeof(*event); | ||
1116 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | ||
1117 | out_no_event_space: | ||
1118 | if (user_fence_rep != NULL) | ||
1119 | ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | ||
1120 | handle, TTM_REF_USAGE); | ||
1121 | out_no_ref_obj: | ||
1122 | vmw_fence_obj_unreference(&fence); | ||
1123 | return ret; | ||
1124 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h new file mode 100644 index 000000000000..0854a2096b55 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | |||
@@ -0,0 +1,113 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #ifndef _VMWGFX_FENCE_H_ | ||
29 | |||
30 | #define VMW_FENCE_WAIT_TIMEOUT (5*HZ) | ||
31 | |||
32 | struct vmw_private; | ||
33 | |||
34 | struct vmw_fence_manager; | ||
35 | |||
36 | /** | ||
37 | * | ||
38 | * | ||
39 | */ | ||
40 | enum vmw_action_type { | ||
41 | VMW_ACTION_EVENT = 0, | ||
42 | VMW_ACTION_MAX | ||
43 | }; | ||
44 | |||
45 | struct vmw_fence_action { | ||
46 | struct list_head head; | ||
47 | enum vmw_action_type type; | ||
48 | void (*seq_passed) (struct vmw_fence_action *action); | ||
49 | void (*cleanup) (struct vmw_fence_action *action); | ||
50 | }; | ||
51 | |||
52 | struct vmw_fence_obj { | ||
53 | struct kref kref; | ||
54 | u32 seqno; | ||
55 | |||
56 | struct vmw_fence_manager *fman; | ||
57 | struct list_head head; | ||
58 | uint32_t signaled; | ||
59 | uint32_t signal_mask; | ||
60 | struct list_head seq_passed_actions; | ||
61 | void (*destroy)(struct vmw_fence_obj *fence); | ||
62 | wait_queue_head_t queue; | ||
63 | }; | ||
64 | |||
65 | extern struct vmw_fence_manager * | ||
66 | vmw_fence_manager_init(struct vmw_private *dev_priv); | ||
67 | |||
68 | extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman); | ||
69 | |||
70 | extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p); | ||
71 | |||
72 | extern struct vmw_fence_obj * | ||
73 | vmw_fence_obj_reference(struct vmw_fence_obj *fence); | ||
74 | |||
75 | extern void vmw_fences_update(struct vmw_fence_manager *fman); | ||
76 | |||
77 | extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, | ||
78 | uint32_t flags); | ||
79 | |||
80 | extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags, | ||
81 | bool lazy, | ||
82 | bool interruptible, unsigned long timeout); | ||
83 | |||
84 | extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence); | ||
85 | |||
86 | extern int vmw_fence_create(struct vmw_fence_manager *fman, | ||
87 | uint32_t seqno, | ||
88 | uint32_t mask, | ||
89 | struct vmw_fence_obj **p_fence); | ||
90 | |||
91 | extern int vmw_user_fence_create(struct drm_file *file_priv, | ||
92 | struct vmw_fence_manager *fman, | ||
93 | uint32_t sequence, | ||
94 | uint32_t mask, | ||
95 | struct vmw_fence_obj **p_fence, | ||
96 | uint32_t *p_handle); | ||
97 | |||
98 | extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman); | ||
99 | |||
100 | extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman); | ||
101 | |||
102 | extern int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, | ||
103 | struct drm_file *file_priv); | ||
104 | |||
105 | extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, | ||
106 | struct drm_file *file_priv); | ||
107 | |||
108 | extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, | ||
109 | struct drm_file *file_priv); | ||
110 | extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | ||
111 | struct drm_file *file_priv); | ||
112 | |||
113 | #endif /* _VMWGFX_FENCE_H_ */ | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 635c0ffee7fe..03bbc2a6f9a7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -45,7 +45,11 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
45 | if (hwversion == 0) | 45 | if (hwversion == 0) |
46 | return false; | 46 | return false; |
47 | 47 | ||
48 | if (hwversion < SVGA3D_HWVERSION_WS65_B1) | 48 | if (hwversion < SVGA3D_HWVERSION_WS8_B1) |
49 | return false; | ||
50 | |||
51 | /* Non-Screen Object path does not support surfaces */ | ||
52 | if (!dev_priv->sou_priv) | ||
49 | return false; | 53 | return false; |
50 | 54 | ||
51 | return true; | 55 | return true; |
@@ -72,22 +76,12 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
72 | uint32_t max; | 76 | uint32_t max; |
73 | uint32_t min; | 77 | uint32_t min; |
74 | uint32_t dummy; | 78 | uint32_t dummy; |
75 | int ret; | ||
76 | 79 | ||
77 | fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; | 80 | fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; |
78 | fifo->static_buffer = vmalloc(fifo->static_buffer_size); | 81 | fifo->static_buffer = vmalloc(fifo->static_buffer_size); |
79 | if (unlikely(fifo->static_buffer == NULL)) | 82 | if (unlikely(fifo->static_buffer == NULL)) |
80 | return -ENOMEM; | 83 | return -ENOMEM; |
81 | 84 | ||
82 | fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE; | ||
83 | fifo->last_data_size = 0; | ||
84 | fifo->last_buffer_add = false; | ||
85 | fifo->last_buffer = vmalloc(fifo->last_buffer_size); | ||
86 | if (unlikely(fifo->last_buffer == NULL)) { | ||
87 | ret = -ENOMEM; | ||
88 | goto out_err; | ||
89 | } | ||
90 | |||
91 | fifo->dynamic_buffer = NULL; | 85 | fifo->dynamic_buffer = NULL; |
92 | fifo->reserved_size = 0; | 86 | fifo->reserved_size = 0; |
93 | fifo->using_bounce_buffer = false; | 87 | fifo->using_bounce_buffer = false; |
@@ -137,14 +131,10 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
137 | (unsigned int) min, | 131 | (unsigned int) min, |
138 | (unsigned int) fifo->capabilities); | 132 | (unsigned int) fifo->capabilities); |
139 | 133 | ||
140 | atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); | 134 | atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); |
141 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); | 135 | iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); |
142 | vmw_fence_queue_init(&fifo->fence_queue); | 136 | vmw_marker_queue_init(&fifo->marker_queue); |
143 | return vmw_fifo_send_fence(dev_priv, &dummy); | 137 | return vmw_fifo_send_fence(dev_priv, &dummy); |
144 | out_err: | ||
145 | vfree(fifo->static_buffer); | ||
146 | fifo->static_buffer = NULL; | ||
147 | return ret; | ||
148 | } | 138 | } |
149 | 139 | ||
150 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) | 140 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) |
@@ -170,7 +160,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
170 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) | 160 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) |
171 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | 161 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); |
172 | 162 | ||
173 | dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | 163 | dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); |
174 | 164 | ||
175 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, | 165 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, |
176 | dev_priv->config_done_state); | 166 | dev_priv->config_done_state); |
@@ -180,12 +170,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
180 | dev_priv->traces_state); | 170 | dev_priv->traces_state); |
181 | 171 | ||
182 | mutex_unlock(&dev_priv->hw_mutex); | 172 | mutex_unlock(&dev_priv->hw_mutex); |
183 | vmw_fence_queue_takedown(&fifo->fence_queue); | 173 | vmw_marker_queue_takedown(&fifo->marker_queue); |
184 | |||
185 | if (likely(fifo->last_buffer != NULL)) { | ||
186 | vfree(fifo->last_buffer); | ||
187 | fifo->last_buffer = NULL; | ||
188 | } | ||
189 | 174 | ||
190 | if (likely(fifo->static_buffer != NULL)) { | 175 | if (likely(fifo->static_buffer != NULL)) { |
191 | vfree(fifo->static_buffer); | 176 | vfree(fifo->static_buffer); |
@@ -262,9 +247,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
262 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 247 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
263 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, | 248 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, |
264 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 249 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
265 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | 250 | dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS; |
266 | vmw_read(dev_priv, SVGA_REG_IRQMASK) | | 251 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
267 | SVGA_IRQFLAG_FIFO_PROGRESS); | ||
268 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 252 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
269 | } | 253 | } |
270 | mutex_unlock(&dev_priv->hw_mutex); | 254 | mutex_unlock(&dev_priv->hw_mutex); |
@@ -286,9 +270,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
286 | mutex_lock(&dev_priv->hw_mutex); | 270 | mutex_lock(&dev_priv->hw_mutex); |
287 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { | 271 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { |
288 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 272 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
289 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | 273 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; |
290 | vmw_read(dev_priv, SVGA_REG_IRQMASK) & | 274 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
291 | ~SVGA_IRQFLAG_FIFO_PROGRESS); | ||
292 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 275 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
293 | } | 276 | } |
294 | mutex_unlock(&dev_priv->hw_mutex); | 277 | mutex_unlock(&dev_priv->hw_mutex); |
@@ -296,6 +279,16 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
296 | return ret; | 279 | return ret; |
297 | } | 280 | } |
298 | 281 | ||
282 | /** | ||
283 | * Reserve @bytes number of bytes in the fifo. | ||
284 | * | ||
285 | * This function will return NULL (error) on two conditions: | ||
286 | * If it timeouts waiting for fifo space, or if @bytes is larger than the | ||
287 | * available fifo space. | ||
288 | * | ||
289 | * Returns: | ||
290 | * Pointer to the fifo, or null on error (possible hardware hang). | ||
291 | */ | ||
299 | void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | 292 | void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) |
300 | { | 293 | { |
301 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | 294 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; |
@@ -466,7 +459,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | |||
466 | mutex_unlock(&fifo_state->fifo_mutex); | 459 | mutex_unlock(&fifo_state->fifo_mutex); |
467 | } | 460 | } |
468 | 461 | ||
469 | int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | 462 | int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) |
470 | { | 463 | { |
471 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | 464 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; |
472 | struct svga_fifo_cmd_fence *cmd_fence; | 465 | struct svga_fifo_cmd_fence *cmd_fence; |
@@ -476,16 +469,16 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | |||
476 | 469 | ||
477 | fm = vmw_fifo_reserve(dev_priv, bytes); | 470 | fm = vmw_fifo_reserve(dev_priv, bytes); |
478 | if (unlikely(fm == NULL)) { | 471 | if (unlikely(fm == NULL)) { |
479 | *sequence = atomic_read(&dev_priv->fence_seq); | 472 | *seqno = atomic_read(&dev_priv->marker_seq); |
480 | ret = -ENOMEM; | 473 | ret = -ENOMEM; |
481 | (void)vmw_fallback_wait(dev_priv, false, true, *sequence, | 474 | (void)vmw_fallback_wait(dev_priv, false, true, *seqno, |
482 | false, 3*HZ); | 475 | false, 3*HZ); |
483 | goto out_err; | 476 | goto out_err; |
484 | } | 477 | } |
485 | 478 | ||
486 | do { | 479 | do { |
487 | *sequence = atomic_add_return(1, &dev_priv->fence_seq); | 480 | *seqno = atomic_add_return(1, &dev_priv->marker_seq); |
488 | } while (*sequence == 0); | 481 | } while (*seqno == 0); |
489 | 482 | ||
490 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { | 483 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { |
491 | 484 | ||
@@ -502,61 +495,68 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | |||
502 | cmd_fence = (struct svga_fifo_cmd_fence *) | 495 | cmd_fence = (struct svga_fifo_cmd_fence *) |
503 | ((unsigned long)fm + sizeof(__le32)); | 496 | ((unsigned long)fm + sizeof(__le32)); |
504 | 497 | ||
505 | iowrite32(*sequence, &cmd_fence->fence); | 498 | iowrite32(*seqno, &cmd_fence->fence); |
506 | fifo_state->last_buffer_add = true; | ||
507 | vmw_fifo_commit(dev_priv, bytes); | 499 | vmw_fifo_commit(dev_priv, bytes); |
508 | fifo_state->last_buffer_add = false; | 500 | (void) vmw_marker_push(&fifo_state->marker_queue, *seqno); |
509 | (void) vmw_fence_push(&fifo_state->fence_queue, *sequence); | 501 | vmw_update_seqno(dev_priv, fifo_state); |
510 | vmw_update_sequence(dev_priv, fifo_state); | ||
511 | 502 | ||
512 | out_err: | 503 | out_err: |
513 | return ret; | 504 | return ret; |
514 | } | 505 | } |
515 | 506 | ||
516 | /** | 507 | /** |
517 | * Map the first page of the FIFO read-only to user-space. | 508 | * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo. |
509 | * | ||
510 | * @dev_priv: The device private structure. | ||
511 | * @cid: The hardware context id used for the query. | ||
512 | * | ||
513 | * This function is used to emit a dummy occlusion query with | ||
514 | * no primitives rendered between query begin and query end. | ||
515 | * It's used to provide a query barrier, in order to know that when | ||
516 | * this query is finished, all preceding queries are also finished. | ||
517 | * | ||
518 | * A Query results structure should have been initialized at the start | ||
519 | * of the dev_priv->dummy_query_bo buffer object. And that buffer object | ||
520 | * must also be either reserved or pinned when this function is called. | ||
521 | * | ||
522 | * Returns -ENOMEM on failure to reserve fifo space. | ||
518 | */ | 523 | */ |
519 | 524 | int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | |
520 | static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 525 | uint32_t cid) |
521 | { | 526 | { |
522 | int ret; | 527 | /* |
523 | unsigned long address = (unsigned long)vmf->virtual_address; | 528 | * A query wait without a preceding query end will |
529 | * actually finish all queries for this cid | ||
530 | * without writing to the query result structure. | ||
531 | */ | ||
524 | 532 | ||
525 | if (address != vma->vm_start) | 533 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; |
526 | return VM_FAULT_SIGBUS; | 534 | struct { |
535 | SVGA3dCmdHeader header; | ||
536 | SVGA3dCmdWaitForQuery body; | ||
537 | } *cmd; | ||
527 | 538 | ||
528 | ret = vm_insert_pfn(vma, address, vma->vm_pgoff); | 539 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
529 | if (likely(ret == -EBUSY || ret == 0)) | ||
530 | return VM_FAULT_NOPAGE; | ||
531 | else if (ret == -ENOMEM) | ||
532 | return VM_FAULT_OOM; | ||
533 | 540 | ||
534 | return VM_FAULT_SIGBUS; | 541 | if (unlikely(cmd == NULL)) { |
535 | } | 542 | DRM_ERROR("Out of fifo space for dummy query.\n"); |
543 | return -ENOMEM; | ||
544 | } | ||
536 | 545 | ||
537 | static struct vm_operations_struct vmw_fifo_vm_ops = { | 546 | cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY; |
538 | .fault = vmw_fifo_vm_fault, | 547 | cmd->header.size = sizeof(cmd->body); |
539 | .open = NULL, | 548 | cmd->body.cid = cid; |
540 | .close = NULL | 549 | cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; |
541 | }; | 550 | |
551 | if (bo->mem.mem_type == TTM_PL_VRAM) { | ||
552 | cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER; | ||
553 | cmd->body.guestResult.offset = bo->offset; | ||
554 | } else { | ||
555 | cmd->body.guestResult.gmrId = bo->mem.start; | ||
556 | cmd->body.guestResult.offset = 0; | ||
557 | } | ||
558 | |||
559 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
542 | 560 | ||
543 | int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma) | ||
544 | { | ||
545 | struct drm_file *file_priv; | ||
546 | struct vmw_private *dev_priv; | ||
547 | |||
548 | file_priv = filp->private_data; | ||
549 | dev_priv = vmw_priv(file_priv->minor->dev); | ||
550 | |||
551 | if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) || | ||
552 | (vma->vm_end - vma->vm_start) != PAGE_SIZE) | ||
553 | return -EINVAL; | ||
554 | |||
555 | vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); | ||
556 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED; | ||
557 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | ||
558 | vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED, | ||
559 | vma->vm_page_prot); | ||
560 | vma->vm_ops = &vmw_fifo_vm_ops; | ||
561 | return 0; | 561 | return 0; |
562 | } | 562 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index de0c5948521d..f4e7763a7694 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /************************************************************************** | 1 | /************************************************************************** |
2 | * | 2 | * |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | 3 | * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
@@ -29,6 +29,77 @@ | |||
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "ttm/ttm_bo_driver.h" | 30 | #include "ttm/ttm_bo_driver.h" |
31 | 31 | ||
32 | #define VMW_PPN_SIZE sizeof(unsigned long) | ||
33 | |||
34 | static int vmw_gmr2_bind(struct vmw_private *dev_priv, | ||
35 | struct page *pages[], | ||
36 | unsigned long num_pages, | ||
37 | int gmr_id) | ||
38 | { | ||
39 | SVGAFifoCmdDefineGMR2 define_cmd; | ||
40 | SVGAFifoCmdRemapGMR2 remap_cmd; | ||
41 | uint32_t define_size = sizeof(define_cmd) + 4; | ||
42 | uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4; | ||
43 | uint32_t *cmd; | ||
44 | uint32_t *cmd_orig; | ||
45 | uint32_t i; | ||
46 | |||
47 | cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size); | ||
48 | if (unlikely(cmd == NULL)) | ||
49 | return -ENOMEM; | ||
50 | |||
51 | define_cmd.gmrId = gmr_id; | ||
52 | define_cmd.numPages = num_pages; | ||
53 | |||
54 | remap_cmd.gmrId = gmr_id; | ||
55 | remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? | ||
56 | SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; | ||
57 | remap_cmd.offsetPages = 0; | ||
58 | remap_cmd.numPages = num_pages; | ||
59 | |||
60 | *cmd++ = SVGA_CMD_DEFINE_GMR2; | ||
61 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); | ||
62 | cmd += sizeof(define_cmd) / sizeof(uint32); | ||
63 | |||
64 | *cmd++ = SVGA_CMD_REMAP_GMR2; | ||
65 | memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); | ||
66 | cmd += sizeof(remap_cmd) / sizeof(uint32); | ||
67 | |||
68 | for (i = 0; i < num_pages; ++i) { | ||
69 | if (VMW_PPN_SIZE > 4) | ||
70 | *cmd = page_to_pfn(*pages++); | ||
71 | else | ||
72 | *((uint64_t *)cmd) = page_to_pfn(*pages++); | ||
73 | |||
74 | cmd += VMW_PPN_SIZE / sizeof(*cmd); | ||
75 | } | ||
76 | |||
77 | vmw_fifo_commit(dev_priv, define_size + remap_size); | ||
78 | |||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static void vmw_gmr2_unbind(struct vmw_private *dev_priv, | ||
83 | int gmr_id) | ||
84 | { | ||
85 | SVGAFifoCmdDefineGMR2 define_cmd; | ||
86 | uint32_t define_size = sizeof(define_cmd) + 4; | ||
87 | uint32_t *cmd; | ||
88 | |||
89 | cmd = vmw_fifo_reserve(dev_priv, define_size); | ||
90 | if (unlikely(cmd == NULL)) { | ||
91 | DRM_ERROR("GMR2 unbind failed.\n"); | ||
92 | return; | ||
93 | } | ||
94 | define_cmd.gmrId = gmr_id; | ||
95 | define_cmd.numPages = 0; | ||
96 | |||
97 | *cmd++ = SVGA_CMD_DEFINE_GMR2; | ||
98 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); | ||
99 | |||
100 | vmw_fifo_commit(dev_priv, define_size); | ||
101 | } | ||
102 | |||
32 | /** | 103 | /** |
33 | * FIXME: Adjust to the ttm lowmem / highmem storage to minimize | 104 | * FIXME: Adjust to the ttm lowmem / highmem storage to minimize |
34 | * the number of used descriptors. | 105 | * the number of used descriptors. |
@@ -170,6 +241,9 @@ int vmw_gmr_bind(struct vmw_private *dev_priv, | |||
170 | struct list_head desc_pages; | 241 | struct list_head desc_pages; |
171 | int ret; | 242 | int ret; |
172 | 243 | ||
244 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) | ||
245 | return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id); | ||
246 | |||
173 | if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) | 247 | if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) |
174 | return -EINVAL; | 248 | return -EINVAL; |
175 | 249 | ||
@@ -192,6 +266,11 @@ int vmw_gmr_bind(struct vmw_private *dev_priv, | |||
192 | 266 | ||
193 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) | 267 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) |
194 | { | 268 | { |
269 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { | ||
270 | vmw_gmr2_unbind(dev_priv, gmr_id); | ||
271 | return; | ||
272 | } | ||
273 | |||
195 | mutex_lock(&dev_priv->hw_mutex); | 274 | mutex_lock(&dev_priv->hw_mutex); |
196 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); | 275 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); |
197 | wmb(); | 276 | wmb(); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index ac6e0d1bd629..5f717152cff5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | |||
@@ -40,6 +40,8 @@ struct vmwgfx_gmrid_man { | |||
40 | spinlock_t lock; | 40 | spinlock_t lock; |
41 | struct ida gmr_ida; | 41 | struct ida gmr_ida; |
42 | uint32_t max_gmr_ids; | 42 | uint32_t max_gmr_ids; |
43 | uint32_t max_gmr_pages; | ||
44 | uint32_t used_gmr_pages; | ||
43 | }; | 45 | }; |
44 | 46 | ||
45 | static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, | 47 | static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, |
@@ -49,33 +51,50 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, | |||
49 | { | 51 | { |
50 | struct vmwgfx_gmrid_man *gman = | 52 | struct vmwgfx_gmrid_man *gman = |
51 | (struct vmwgfx_gmrid_man *)man->priv; | 53 | (struct vmwgfx_gmrid_man *)man->priv; |
52 | int ret; | 54 | int ret = 0; |
53 | int id; | 55 | int id; |
54 | 56 | ||
55 | mem->mm_node = NULL; | 57 | mem->mm_node = NULL; |
56 | 58 | ||
57 | do { | 59 | spin_lock(&gman->lock); |
58 | if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) | 60 | |
59 | return -ENOMEM; | 61 | if (gman->max_gmr_pages > 0) { |
62 | gman->used_gmr_pages += bo->num_pages; | ||
63 | if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) | ||
64 | goto out_err_locked; | ||
65 | } | ||
60 | 66 | ||
67 | do { | ||
68 | spin_unlock(&gman->lock); | ||
69 | if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) { | ||
70 | ret = -ENOMEM; | ||
71 | goto out_err; | ||
72 | } | ||
61 | spin_lock(&gman->lock); | 73 | spin_lock(&gman->lock); |
62 | ret = ida_get_new(&gman->gmr_ida, &id); | ||
63 | 74 | ||
75 | ret = ida_get_new(&gman->gmr_ida, &id); | ||
64 | if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) { | 76 | if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) { |
65 | ida_remove(&gman->gmr_ida, id); | 77 | ida_remove(&gman->gmr_ida, id); |
66 | spin_unlock(&gman->lock); | 78 | ret = 0; |
67 | return 0; | 79 | goto out_err_locked; |
68 | } | 80 | } |
69 | |||
70 | spin_unlock(&gman->lock); | ||
71 | |||
72 | } while (ret == -EAGAIN); | 81 | } while (ret == -EAGAIN); |
73 | 82 | ||
74 | if (likely(ret == 0)) { | 83 | if (likely(ret == 0)) { |
75 | mem->mm_node = gman; | 84 | mem->mm_node = gman; |
76 | mem->start = id; | 85 | mem->start = id; |
77 | } | 86 | mem->num_pages = bo->num_pages; |
87 | } else | ||
88 | goto out_err_locked; | ||
89 | |||
90 | spin_unlock(&gman->lock); | ||
91 | return 0; | ||
78 | 92 | ||
93 | out_err: | ||
94 | spin_lock(&gman->lock); | ||
95 | out_err_locked: | ||
96 | gman->used_gmr_pages -= bo->num_pages; | ||
97 | spin_unlock(&gman->lock); | ||
79 | return ret; | 98 | return ret; |
80 | } | 99 | } |
81 | 100 | ||
@@ -88,6 +107,7 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, | |||
88 | if (mem->mm_node) { | 107 | if (mem->mm_node) { |
89 | spin_lock(&gman->lock); | 108 | spin_lock(&gman->lock); |
90 | ida_remove(&gman->gmr_ida, mem->start); | 109 | ida_remove(&gman->gmr_ida, mem->start); |
110 | gman->used_gmr_pages -= mem->num_pages; | ||
91 | spin_unlock(&gman->lock); | 111 | spin_unlock(&gman->lock); |
92 | mem->mm_node = NULL; | 112 | mem->mm_node = NULL; |
93 | } | 113 | } |
@@ -96,6 +116,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, | |||
96 | static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, | 116 | static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, |
97 | unsigned long p_size) | 117 | unsigned long p_size) |
98 | { | 118 | { |
119 | struct vmw_private *dev_priv = | ||
120 | container_of(man->bdev, struct vmw_private, bdev); | ||
99 | struct vmwgfx_gmrid_man *gman = | 121 | struct vmwgfx_gmrid_man *gman = |
100 | kzalloc(sizeof(*gman), GFP_KERNEL); | 122 | kzalloc(sizeof(*gman), GFP_KERNEL); |
101 | 123 | ||
@@ -103,6 +125,8 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, | |||
103 | return -ENOMEM; | 125 | return -ENOMEM; |
104 | 126 | ||
105 | spin_lock_init(&gman->lock); | 127 | spin_lock_init(&gman->lock); |
128 | gman->max_gmr_pages = dev_priv->max_gmr_pages; | ||
129 | gman->used_gmr_pages = 0; | ||
106 | ida_init(&gman->gmr_ida); | 130 | ida_init(&gman->gmr_ida); |
107 | gman->max_gmr_ids = p_size; | 131 | gman->max_gmr_ids = p_size; |
108 | man->priv = (void *) gman; | 132 | man->priv = (void *) gman; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 570d57775a58..3f6343502d1f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_drv.h" | 28 | #include "vmwgfx_drv.h" |
29 | #include "vmwgfx_drm.h" | 29 | #include "vmwgfx_drm.h" |
30 | #include "vmwgfx_kms.h" | ||
30 | 31 | ||
31 | int vmw_getparam_ioctl(struct drm_device *dev, void *data, | 32 | int vmw_getparam_ioctl(struct drm_device *dev, void *data, |
32 | struct drm_file *file_priv) | 33 | struct drm_file *file_priv) |
@@ -45,9 +46,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
45 | case DRM_VMW_PARAM_3D: | 46 | case DRM_VMW_PARAM_3D: |
46 | param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0; | 47 | param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0; |
47 | break; | 48 | break; |
48 | case DRM_VMW_PARAM_FIFO_OFFSET: | ||
49 | param->value = dev_priv->mmio_start; | ||
50 | break; | ||
51 | case DRM_VMW_PARAM_HW_CAPS: | 49 | case DRM_VMW_PARAM_HW_CAPS: |
52 | param->value = dev_priv->capabilities; | 50 | param->value = dev_priv->capabilities; |
53 | break; | 51 | break; |
@@ -57,6 +55,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
57 | case DRM_VMW_PARAM_MAX_FB_SIZE: | 55 | case DRM_VMW_PARAM_MAX_FB_SIZE: |
58 | param->value = dev_priv->vram_size; | 56 | param->value = dev_priv->vram_size; |
59 | break; | 57 | break; |
58 | case DRM_VMW_PARAM_FIFO_HW_VERSION: | ||
59 | { | ||
60 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
61 | |||
62 | param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION); | ||
63 | break; | ||
64 | } | ||
60 | default: | 65 | default: |
61 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 66 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
62 | param->param); | 67 | param->param); |
@@ -66,25 +71,259 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
66 | return 0; | 71 | return 0; |
67 | } | 72 | } |
68 | 73 | ||
69 | int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data, | 74 | |
75 | int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | ||
70 | struct drm_file *file_priv) | 76 | struct drm_file *file_priv) |
71 | { | 77 | { |
78 | struct drm_vmw_get_3d_cap_arg *arg = | ||
79 | (struct drm_vmw_get_3d_cap_arg *) data; | ||
72 | struct vmw_private *dev_priv = vmw_priv(dev); | 80 | struct vmw_private *dev_priv = vmw_priv(dev); |
73 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | 81 | uint32_t size; |
74 | struct drm_vmw_fifo_debug_arg *arg = | 82 | __le32 __iomem *fifo_mem; |
75 | (struct drm_vmw_fifo_debug_arg *)data; | 83 | void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); |
76 | __le32 __user *buffer = (__le32 __user *) | 84 | void *bounce; |
77 | (unsigned long)arg->debug_buffer; | 85 | int ret; |
78 | 86 | ||
79 | if (unlikely(fifo_state->last_buffer == NULL)) | 87 | if (unlikely(arg->pad64 != 0)) { |
88 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); | ||
80 | return -EINVAL; | 89 | return -EINVAL; |
90 | } | ||
91 | |||
92 | size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2; | ||
93 | |||
94 | if (arg->max_size < size) | ||
95 | size = arg->max_size; | ||
96 | |||
97 | bounce = vmalloc(size); | ||
98 | if (unlikely(bounce == NULL)) { | ||
99 | DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); | ||
100 | return -ENOMEM; | ||
101 | } | ||
102 | |||
103 | fifo_mem = dev_priv->mmio_virt; | ||
104 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); | ||
105 | |||
106 | ret = copy_to_user(buffer, bounce, size); | ||
107 | vfree(bounce); | ||
108 | |||
109 | if (unlikely(ret != 0)) | ||
110 | DRM_ERROR("Failed to report 3D caps info.\n"); | ||
111 | |||
112 | return ret; | ||
113 | } | ||
114 | |||
115 | int vmw_present_ioctl(struct drm_device *dev, void *data, | ||
116 | struct drm_file *file_priv) | ||
117 | { | ||
118 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
119 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
120 | struct drm_vmw_present_arg *arg = | ||
121 | (struct drm_vmw_present_arg *)data; | ||
122 | struct vmw_surface *surface; | ||
123 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
124 | struct drm_vmw_rect __user *clips_ptr; | ||
125 | struct drm_vmw_rect *clips = NULL; | ||
126 | struct drm_mode_object *obj; | ||
127 | struct vmw_framebuffer *vfb; | ||
128 | uint32_t num_clips; | ||
129 | int ret; | ||
130 | |||
131 | num_clips = arg->num_clips; | ||
132 | clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; | ||
133 | |||
134 | if (unlikely(num_clips == 0)) | ||
135 | return 0; | ||
136 | |||
137 | if (clips_ptr == NULL) { | ||
138 | DRM_ERROR("Variable clips_ptr must be specified.\n"); | ||
139 | ret = -EINVAL; | ||
140 | goto out_clips; | ||
141 | } | ||
142 | |||
143 | clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); | ||
144 | if (clips == NULL) { | ||
145 | DRM_ERROR("Failed to allocate clip rect list.\n"); | ||
146 | ret = -ENOMEM; | ||
147 | goto out_clips; | ||
148 | } | ||
149 | |||
150 | ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); | ||
151 | if (ret) { | ||
152 | DRM_ERROR("Failed to copy clip rects from userspace.\n"); | ||
153 | ret = -EFAULT; | ||
154 | goto out_no_copy; | ||
155 | } | ||
156 | |||
157 | ret = mutex_lock_interruptible(&dev->mode_config.mutex); | ||
158 | if (unlikely(ret != 0)) { | ||
159 | ret = -ERESTARTSYS; | ||
160 | goto out_no_mode_mutex; | ||
161 | } | ||
162 | |||
163 | obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB); | ||
164 | if (!obj) { | ||
165 | DRM_ERROR("Invalid framebuffer id.\n"); | ||
166 | ret = -EINVAL; | ||
167 | goto out_no_fb; | ||
168 | } | ||
169 | |||
170 | vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); | ||
171 | if (!vfb->dmabuf) { | ||
172 | DRM_ERROR("Framebuffer not dmabuf backed.\n"); | ||
173 | ret = -EINVAL; | ||
174 | goto out_no_fb; | ||
175 | } | ||
176 | |||
177 | ret = ttm_read_lock(&vmaster->lock, true); | ||
178 | if (unlikely(ret != 0)) | ||
179 | goto out_no_ttm_lock; | ||
180 | |||
181 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid, | ||
182 | &surface); | ||
183 | if (ret) | ||
184 | goto out_no_surface; | ||
185 | |||
186 | ret = vmw_kms_present(dev_priv, file_priv, | ||
187 | vfb, surface, arg->sid, | ||
188 | arg->dest_x, arg->dest_y, | ||
189 | clips, num_clips); | ||
81 | 190 | ||
82 | if (arg->debug_buffer_size < fifo_state->last_data_size) { | 191 | /* vmw_user_surface_lookup takes one ref so does new_fb */ |
83 | arg->used_size = arg->debug_buffer_size; | 192 | vmw_surface_unreference(&surface); |
84 | arg->did_not_fit = 1; | 193 | |
85 | } else { | 194 | out_no_surface: |
86 | arg->used_size = fifo_state->last_data_size; | 195 | ttm_read_unlock(&vmaster->lock); |
87 | arg->did_not_fit = 0; | 196 | out_no_ttm_lock: |
197 | out_no_fb: | ||
198 | mutex_unlock(&dev->mode_config.mutex); | ||
199 | out_no_mode_mutex: | ||
200 | out_no_copy: | ||
201 | kfree(clips); | ||
202 | out_clips: | ||
203 | return ret; | ||
204 | } | ||
205 | |||
206 | int vmw_present_readback_ioctl(struct drm_device *dev, void *data, | ||
207 | struct drm_file *file_priv) | ||
208 | { | ||
209 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
210 | struct drm_vmw_present_readback_arg *arg = | ||
211 | (struct drm_vmw_present_readback_arg *)data; | ||
212 | struct drm_vmw_fence_rep __user *user_fence_rep = | ||
213 | (struct drm_vmw_fence_rep __user *) | ||
214 | (unsigned long)arg->fence_rep; | ||
215 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
216 | struct drm_vmw_rect __user *clips_ptr; | ||
217 | struct drm_vmw_rect *clips = NULL; | ||
218 | struct drm_mode_object *obj; | ||
219 | struct vmw_framebuffer *vfb; | ||
220 | uint32_t num_clips; | ||
221 | int ret; | ||
222 | |||
223 | num_clips = arg->num_clips; | ||
224 | clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; | ||
225 | |||
226 | if (unlikely(num_clips == 0)) | ||
227 | return 0; | ||
228 | |||
229 | if (clips_ptr == NULL) { | ||
230 | DRM_ERROR("Argument clips_ptr must be specified.\n"); | ||
231 | ret = -EINVAL; | ||
232 | goto out_clips; | ||
233 | } | ||
234 | |||
235 | clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); | ||
236 | if (clips == NULL) { | ||
237 | DRM_ERROR("Failed to allocate clip rect list.\n"); | ||
238 | ret = -ENOMEM; | ||
239 | goto out_clips; | ||
240 | } | ||
241 | |||
242 | ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); | ||
243 | if (ret) { | ||
244 | DRM_ERROR("Failed to copy clip rects from userspace.\n"); | ||
245 | ret = -EFAULT; | ||
246 | goto out_no_copy; | ||
247 | } | ||
248 | |||
249 | ret = mutex_lock_interruptible(&dev->mode_config.mutex); | ||
250 | if (unlikely(ret != 0)) { | ||
251 | ret = -ERESTARTSYS; | ||
252 | goto out_no_mode_mutex; | ||
253 | } | ||
254 | |||
255 | obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB); | ||
256 | if (!obj) { | ||
257 | DRM_ERROR("Invalid framebuffer id.\n"); | ||
258 | ret = -EINVAL; | ||
259 | goto out_no_fb; | ||
260 | } | ||
261 | |||
262 | vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); | ||
263 | if (!vfb->dmabuf) { | ||
264 | DRM_ERROR("Framebuffer not dmabuf backed.\n"); | ||
265 | ret = -EINVAL; | ||
266 | goto out_no_fb; | ||
88 | } | 267 | } |
89 | return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size); | 268 | |
269 | ret = ttm_read_lock(&vmaster->lock, true); | ||
270 | if (unlikely(ret != 0)) | ||
271 | goto out_no_ttm_lock; | ||
272 | |||
273 | ret = vmw_kms_readback(dev_priv, file_priv, | ||
274 | vfb, user_fence_rep, | ||
275 | clips, num_clips); | ||
276 | |||
277 | ttm_read_unlock(&vmaster->lock); | ||
278 | out_no_ttm_lock: | ||
279 | out_no_fb: | ||
280 | mutex_unlock(&dev->mode_config.mutex); | ||
281 | out_no_mode_mutex: | ||
282 | out_no_copy: | ||
283 | kfree(clips); | ||
284 | out_clips: | ||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | |||
289 | /** | ||
290 | * vmw_fops_poll - wrapper around the drm_poll function | ||
291 | * | ||
292 | * @filp: See the linux fops poll documentation. | ||
293 | * @wait: See the linux fops poll documentation. | ||
294 | * | ||
295 | * Wrapper around the drm_poll function that makes sure the device is | ||
296 | * processing the fifo if drm_poll decides to wait. | ||
297 | */ | ||
298 | unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait) | ||
299 | { | ||
300 | struct drm_file *file_priv = filp->private_data; | ||
301 | struct vmw_private *dev_priv = | ||
302 | vmw_priv(file_priv->minor->dev); | ||
303 | |||
304 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
305 | return drm_poll(filp, wait); | ||
306 | } | ||
307 | |||
308 | |||
309 | /** | ||
310 | * vmw_fops_read - wrapper around the drm_read function | ||
311 | * | ||
312 | * @filp: See the linux fops read documentation. | ||
313 | * @buffer: See the linux fops read documentation. | ||
314 | * @count: See the linux fops read documentation. | ||
315 | * offset: See the linux fops read documentation. | ||
316 | * | ||
317 | * Wrapper around the drm_read function that makes sure the device is | ||
318 | * processing the fifo if drm_read decides to wait. | ||
319 | */ | ||
320 | ssize_t vmw_fops_read(struct file *filp, char __user *buffer, | ||
321 | size_t count, loff_t *offset) | ||
322 | { | ||
323 | struct drm_file *file_priv = filp->private_data; | ||
324 | struct vmw_private *dev_priv = | ||
325 | vmw_priv(file_priv->minor->dev); | ||
326 | |||
327 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
328 | return drm_read(filp, buffer, count, offset); | ||
90 | } | 329 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index e92298a6a383..cabc95f7517e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
@@ -34,26 +34,33 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) | |||
34 | { | 34 | { |
35 | struct drm_device *dev = (struct drm_device *)arg; | 35 | struct drm_device *dev = (struct drm_device *)arg; |
36 | struct vmw_private *dev_priv = vmw_priv(dev); | 36 | struct vmw_private *dev_priv = vmw_priv(dev); |
37 | uint32_t status; | 37 | uint32_t status, masked_status; |
38 | 38 | ||
39 | spin_lock(&dev_priv->irq_lock); | 39 | spin_lock(&dev_priv->irq_lock); |
40 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 40 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
41 | masked_status = status & dev_priv->irq_mask; | ||
41 | spin_unlock(&dev_priv->irq_lock); | 42 | spin_unlock(&dev_priv->irq_lock); |
42 | 43 | ||
43 | if (status & SVGA_IRQFLAG_ANY_FENCE) | 44 | if (likely(status)) |
45 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
46 | |||
47 | if (!masked_status) | ||
48 | return IRQ_NONE; | ||
49 | |||
50 | if (masked_status & (SVGA_IRQFLAG_ANY_FENCE | | ||
51 | SVGA_IRQFLAG_FENCE_GOAL)) { | ||
52 | vmw_fences_update(dev_priv->fman); | ||
44 | wake_up_all(&dev_priv->fence_queue); | 53 | wake_up_all(&dev_priv->fence_queue); |
45 | if (status & SVGA_IRQFLAG_FIFO_PROGRESS) | 54 | } |
55 | |||
56 | if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) | ||
46 | wake_up_all(&dev_priv->fifo_queue); | 57 | wake_up_all(&dev_priv->fifo_queue); |
47 | 58 | ||
48 | if (likely(status)) { | ||
49 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
50 | return IRQ_HANDLED; | ||
51 | } | ||
52 | 59 | ||
53 | return IRQ_NONE; | 60 | return IRQ_HANDLED; |
54 | } | 61 | } |
55 | 62 | ||
56 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) | 63 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) |
57 | { | 64 | { |
58 | uint32_t busy; | 65 | uint32_t busy; |
59 | 66 | ||
@@ -64,43 +71,43 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) | |||
64 | return (busy == 0); | 71 | return (busy == 0); |
65 | } | 72 | } |
66 | 73 | ||
67 | void vmw_update_sequence(struct vmw_private *dev_priv, | 74 | void vmw_update_seqno(struct vmw_private *dev_priv, |
68 | struct vmw_fifo_state *fifo_state) | 75 | struct vmw_fifo_state *fifo_state) |
69 | { | 76 | { |
70 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 77 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
78 | uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
71 | 79 | ||
72 | uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | 80 | if (dev_priv->last_read_seqno != seqno) { |
73 | 81 | dev_priv->last_read_seqno = seqno; | |
74 | if (dev_priv->last_read_sequence != sequence) { | 82 | vmw_marker_pull(&fifo_state->marker_queue, seqno); |
75 | dev_priv->last_read_sequence = sequence; | 83 | vmw_fences_update(dev_priv->fman); |
76 | vmw_fence_pull(&fifo_state->fence_queue, sequence); | ||
77 | } | 84 | } |
78 | } | 85 | } |
79 | 86 | ||
80 | bool vmw_fence_signaled(struct vmw_private *dev_priv, | 87 | bool vmw_seqno_passed(struct vmw_private *dev_priv, |
81 | uint32_t sequence) | 88 | uint32_t seqno) |
82 | { | 89 | { |
83 | struct vmw_fifo_state *fifo_state; | 90 | struct vmw_fifo_state *fifo_state; |
84 | bool ret; | 91 | bool ret; |
85 | 92 | ||
86 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | 93 | if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) |
87 | return true; | 94 | return true; |
88 | 95 | ||
89 | fifo_state = &dev_priv->fifo; | 96 | fifo_state = &dev_priv->fifo; |
90 | vmw_update_sequence(dev_priv, fifo_state); | 97 | vmw_update_seqno(dev_priv, fifo_state); |
91 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | 98 | if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) |
92 | return true; | 99 | return true; |
93 | 100 | ||
94 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && | 101 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && |
95 | vmw_fifo_idle(dev_priv, sequence)) | 102 | vmw_fifo_idle(dev_priv, seqno)) |
96 | return true; | 103 | return true; |
97 | 104 | ||
98 | /** | 105 | /** |
99 | * Then check if the sequence is higher than what we've actually | 106 | * Then check if the seqno is higher than what we've actually |
100 | * emitted. Then the fence is stale and signaled. | 107 | * emitted. Then the fence is stale and signaled. |
101 | */ | 108 | */ |
102 | 109 | ||
103 | ret = ((atomic_read(&dev_priv->fence_seq) - sequence) | 110 | ret = ((atomic_read(&dev_priv->marker_seq) - seqno) |
104 | > VMW_FENCE_WRAP); | 111 | > VMW_FENCE_WRAP); |
105 | 112 | ||
106 | return ret; | 113 | return ret; |
@@ -109,7 +116,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv, | |||
109 | int vmw_fallback_wait(struct vmw_private *dev_priv, | 116 | int vmw_fallback_wait(struct vmw_private *dev_priv, |
110 | bool lazy, | 117 | bool lazy, |
111 | bool fifo_idle, | 118 | bool fifo_idle, |
112 | uint32_t sequence, | 119 | uint32_t seqno, |
113 | bool interruptible, | 120 | bool interruptible, |
114 | unsigned long timeout) | 121 | unsigned long timeout) |
115 | { | 122 | { |
@@ -123,7 +130,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
123 | DEFINE_WAIT(__wait); | 130 | DEFINE_WAIT(__wait); |
124 | 131 | ||
125 | wait_condition = (fifo_idle) ? &vmw_fifo_idle : | 132 | wait_condition = (fifo_idle) ? &vmw_fifo_idle : |
126 | &vmw_fence_signaled; | 133 | &vmw_seqno_passed; |
127 | 134 | ||
128 | /** | 135 | /** |
129 | * Block command submission while waiting for idle. | 136 | * Block command submission while waiting for idle. |
@@ -131,14 +138,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
131 | 138 | ||
132 | if (fifo_idle) | 139 | if (fifo_idle) |
133 | down_read(&fifo_state->rwsem); | 140 | down_read(&fifo_state->rwsem); |
134 | signal_seq = atomic_read(&dev_priv->fence_seq); | 141 | signal_seq = atomic_read(&dev_priv->marker_seq); |
135 | ret = 0; | 142 | ret = 0; |
136 | 143 | ||
137 | for (;;) { | 144 | for (;;) { |
138 | prepare_to_wait(&dev_priv->fence_queue, &__wait, | 145 | prepare_to_wait(&dev_priv->fence_queue, &__wait, |
139 | (interruptible) ? | 146 | (interruptible) ? |
140 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | 147 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); |
141 | if (wait_condition(dev_priv, sequence)) | 148 | if (wait_condition(dev_priv, seqno)) |
142 | break; | 149 | break; |
143 | if (time_after_eq(jiffies, end_jiffies)) { | 150 | if (time_after_eq(jiffies, end_jiffies)) { |
144 | DRM_ERROR("SVGA device lockup.\n"); | 151 | DRM_ERROR("SVGA device lockup.\n"); |
@@ -175,68 +182,110 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
175 | return ret; | 182 | return ret; |
176 | } | 183 | } |
177 | 184 | ||
178 | int vmw_wait_fence(struct vmw_private *dev_priv, | 185 | void vmw_seqno_waiter_add(struct vmw_private *dev_priv) |
179 | bool lazy, uint32_t sequence, | 186 | { |
180 | bool interruptible, unsigned long timeout) | 187 | mutex_lock(&dev_priv->hw_mutex); |
188 | if (dev_priv->fence_queue_waiters++ == 0) { | ||
189 | unsigned long irq_flags; | ||
190 | |||
191 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
192 | outl(SVGA_IRQFLAG_ANY_FENCE, | ||
193 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
194 | dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE; | ||
195 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | ||
196 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
197 | } | ||
198 | mutex_unlock(&dev_priv->hw_mutex); | ||
199 | } | ||
200 | |||
201 | void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) | ||
202 | { | ||
203 | mutex_lock(&dev_priv->hw_mutex); | ||
204 | if (--dev_priv->fence_queue_waiters == 0) { | ||
205 | unsigned long irq_flags; | ||
206 | |||
207 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
208 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE; | ||
209 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | ||
210 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
211 | } | ||
212 | mutex_unlock(&dev_priv->hw_mutex); | ||
213 | } | ||
214 | |||
215 | |||
216 | void vmw_goal_waiter_add(struct vmw_private *dev_priv) | ||
217 | { | ||
218 | mutex_lock(&dev_priv->hw_mutex); | ||
219 | if (dev_priv->goal_queue_waiters++ == 0) { | ||
220 | unsigned long irq_flags; | ||
221 | |||
222 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
223 | outl(SVGA_IRQFLAG_FENCE_GOAL, | ||
224 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
225 | dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL; | ||
226 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | ||
227 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
228 | } | ||
229 | mutex_unlock(&dev_priv->hw_mutex); | ||
230 | } | ||
231 | |||
232 | void vmw_goal_waiter_remove(struct vmw_private *dev_priv) | ||
233 | { | ||
234 | mutex_lock(&dev_priv->hw_mutex); | ||
235 | if (--dev_priv->goal_queue_waiters == 0) { | ||
236 | unsigned long irq_flags; | ||
237 | |||
238 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
239 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL; | ||
240 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | ||
241 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
242 | } | ||
243 | mutex_unlock(&dev_priv->hw_mutex); | ||
244 | } | ||
245 | |||
246 | int vmw_wait_seqno(struct vmw_private *dev_priv, | ||
247 | bool lazy, uint32_t seqno, | ||
248 | bool interruptible, unsigned long timeout) | ||
181 | { | 249 | { |
182 | long ret; | 250 | long ret; |
183 | unsigned long irq_flags; | ||
184 | struct vmw_fifo_state *fifo = &dev_priv->fifo; | 251 | struct vmw_fifo_state *fifo = &dev_priv->fifo; |
185 | 252 | ||
186 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | 253 | if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) |
187 | return 0; | 254 | return 0; |
188 | 255 | ||
189 | if (likely(vmw_fence_signaled(dev_priv, sequence))) | 256 | if (likely(vmw_seqno_passed(dev_priv, seqno))) |
190 | return 0; | 257 | return 0; |
191 | 258 | ||
192 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | 259 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
193 | 260 | ||
194 | if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) | 261 | if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) |
195 | return vmw_fallback_wait(dev_priv, lazy, true, sequence, | 262 | return vmw_fallback_wait(dev_priv, lazy, true, seqno, |
196 | interruptible, timeout); | 263 | interruptible, timeout); |
197 | 264 | ||
198 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | 265 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) |
199 | return vmw_fallback_wait(dev_priv, lazy, false, sequence, | 266 | return vmw_fallback_wait(dev_priv, lazy, false, seqno, |
200 | interruptible, timeout); | 267 | interruptible, timeout); |
201 | 268 | ||
202 | mutex_lock(&dev_priv->hw_mutex); | 269 | vmw_seqno_waiter_add(dev_priv); |
203 | if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) { | ||
204 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
205 | outl(SVGA_IRQFLAG_ANY_FENCE, | ||
206 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
207 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
208 | vmw_read(dev_priv, SVGA_REG_IRQMASK) | | ||
209 | SVGA_IRQFLAG_ANY_FENCE); | ||
210 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
211 | } | ||
212 | mutex_unlock(&dev_priv->hw_mutex); | ||
213 | 270 | ||
214 | if (interruptible) | 271 | if (interruptible) |
215 | ret = wait_event_interruptible_timeout | 272 | ret = wait_event_interruptible_timeout |
216 | (dev_priv->fence_queue, | 273 | (dev_priv->fence_queue, |
217 | vmw_fence_signaled(dev_priv, sequence), | 274 | vmw_seqno_passed(dev_priv, seqno), |
218 | timeout); | 275 | timeout); |
219 | else | 276 | else |
220 | ret = wait_event_timeout | 277 | ret = wait_event_timeout |
221 | (dev_priv->fence_queue, | 278 | (dev_priv->fence_queue, |
222 | vmw_fence_signaled(dev_priv, sequence), | 279 | vmw_seqno_passed(dev_priv, seqno), |
223 | timeout); | 280 | timeout); |
224 | 281 | ||
282 | vmw_seqno_waiter_remove(dev_priv); | ||
283 | |||
225 | if (unlikely(ret == 0)) | 284 | if (unlikely(ret == 0)) |
226 | ret = -EBUSY; | 285 | ret = -EBUSY; |
227 | else if (likely(ret > 0)) | 286 | else if (likely(ret > 0)) |
228 | ret = 0; | 287 | ret = 0; |
229 | 288 | ||
230 | mutex_lock(&dev_priv->hw_mutex); | ||
231 | if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) { | ||
232 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
233 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
234 | vmw_read(dev_priv, SVGA_REG_IRQMASK) & | ||
235 | ~SVGA_IRQFLAG_ANY_FENCE); | ||
236 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
237 | } | ||
238 | mutex_unlock(&dev_priv->hw_mutex); | ||
239 | |||
240 | return ret; | 289 | return ret; |
241 | } | 290 | } |
242 | 291 | ||
@@ -273,25 +322,3 @@ void vmw_irq_uninstall(struct drm_device *dev) | |||
273 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 322 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
274 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 323 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
275 | } | 324 | } |
276 | |||
277 | #define VMW_FENCE_WAIT_TIMEOUT 3*HZ; | ||
278 | |||
279 | int vmw_fence_wait_ioctl(struct drm_device *dev, void *data, | ||
280 | struct drm_file *file_priv) | ||
281 | { | ||
282 | struct drm_vmw_fence_wait_arg *arg = | ||
283 | (struct drm_vmw_fence_wait_arg *)data; | ||
284 | unsigned long timeout; | ||
285 | |||
286 | if (!arg->cookie_valid) { | ||
287 | arg->cookie_valid = 1; | ||
288 | arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT; | ||
289 | } | ||
290 | |||
291 | timeout = jiffies; | ||
292 | if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) | ||
293 | return -EBUSY; | ||
294 | |||
295 | timeout = (unsigned long)arg->kernel_cookie - timeout; | ||
296 | return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout); | ||
297 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index dfe32e62bd90..8b14dfd513a1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -27,12 +27,10 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_kms.h" | 28 | #include "vmwgfx_kms.h" |
29 | 29 | ||
30 | |||
30 | /* Might need a hrtimer here? */ | 31 | /* Might need a hrtimer here? */ |
31 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) | 32 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) |
32 | 33 | ||
33 | static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb); | ||
34 | static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb); | ||
35 | |||
36 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) | 34 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) |
37 | { | 35 | { |
38 | if (du->cursor_surface) | 36 | if (du->cursor_surface) |
@@ -329,41 +327,10 @@ struct vmw_framebuffer_surface { | |||
329 | struct vmw_framebuffer base; | 327 | struct vmw_framebuffer base; |
330 | struct vmw_surface *surface; | 328 | struct vmw_surface *surface; |
331 | struct vmw_dma_buffer *buffer; | 329 | struct vmw_dma_buffer *buffer; |
332 | struct delayed_work d_work; | ||
333 | struct mutex work_lock; | ||
334 | bool present_fs; | ||
335 | struct list_head head; | 330 | struct list_head head; |
336 | struct drm_master *master; | 331 | struct drm_master *master; |
337 | }; | 332 | }; |
338 | 333 | ||
339 | /** | ||
340 | * vmw_kms_idle_workqueues - Flush workqueues on this master | ||
341 | * | ||
342 | * @vmaster - Pointer identifying the master, for the surfaces of which | ||
343 | * we idle the dirty work queues. | ||
344 | * | ||
345 | * This function should be called with the ttm lock held in exclusive mode | ||
346 | * to idle all dirty work queues before the fifo is taken down. | ||
347 | * | ||
348 | * The work task may actually requeue itself, but after the flush returns we're | ||
349 | * sure that there's nothing to present, since the ttm lock is held in | ||
350 | * exclusive mode, so the fifo will never get used. | ||
351 | */ | ||
352 | |||
353 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster) | ||
354 | { | ||
355 | struct vmw_framebuffer_surface *entry; | ||
356 | |||
357 | mutex_lock(&vmaster->fb_surf_mutex); | ||
358 | list_for_each_entry(entry, &vmaster->fb_surf, head) { | ||
359 | if (cancel_delayed_work_sync(&entry->d_work)) | ||
360 | (void) entry->d_work.work.func(&entry->d_work.work); | ||
361 | |||
362 | (void) cancel_delayed_work_sync(&entry->d_work); | ||
363 | } | ||
364 | mutex_unlock(&vmaster->fb_surf_mutex); | ||
365 | } | ||
366 | |||
367 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) | 334 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) |
368 | { | 335 | { |
369 | struct vmw_framebuffer_surface *vfbs = | 336 | struct vmw_framebuffer_surface *vfbs = |
@@ -375,64 +342,127 @@ void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) | |||
375 | list_del(&vfbs->head); | 342 | list_del(&vfbs->head); |
376 | mutex_unlock(&vmaster->fb_surf_mutex); | 343 | mutex_unlock(&vmaster->fb_surf_mutex); |
377 | 344 | ||
378 | cancel_delayed_work_sync(&vfbs->d_work); | ||
379 | drm_master_put(&vfbs->master); | 345 | drm_master_put(&vfbs->master); |
380 | drm_framebuffer_cleanup(framebuffer); | 346 | drm_framebuffer_cleanup(framebuffer); |
381 | vmw_surface_unreference(&vfbs->surface); | 347 | vmw_surface_unreference(&vfbs->surface); |
348 | ttm_base_object_unref(&vfbs->base.user_obj); | ||
382 | 349 | ||
383 | kfree(vfbs); | 350 | kfree(vfbs); |
384 | } | 351 | } |
385 | 352 | ||
386 | static void vmw_framebuffer_present_fs_callback(struct work_struct *work) | 353 | static int do_surface_dirty_sou(struct vmw_private *dev_priv, |
354 | struct drm_file *file_priv, | ||
355 | struct vmw_framebuffer *framebuffer, | ||
356 | unsigned flags, unsigned color, | ||
357 | struct drm_clip_rect *clips, | ||
358 | unsigned num_clips, int inc) | ||
387 | { | 359 | { |
388 | struct delayed_work *d_work = | 360 | struct drm_clip_rect *clips_ptr; |
389 | container_of(work, struct delayed_work, work); | 361 | struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; |
390 | struct vmw_framebuffer_surface *vfbs = | 362 | struct drm_crtc *crtc; |
391 | container_of(d_work, struct vmw_framebuffer_surface, d_work); | 363 | size_t fifo_size; |
392 | struct vmw_surface *surf = vfbs->surface; | 364 | int i, num_units; |
393 | struct drm_framebuffer *framebuffer = &vfbs->base.base; | 365 | int ret = 0; /* silence warning */ |
394 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | 366 | int left, right, top, bottom; |
395 | 367 | ||
396 | struct { | 368 | struct { |
397 | SVGA3dCmdHeader header; | 369 | SVGA3dCmdHeader header; |
398 | SVGA3dCmdPresent body; | 370 | SVGA3dCmdBlitSurfaceToScreen body; |
399 | SVGA3dCopyRect cr; | ||
400 | } *cmd; | 371 | } *cmd; |
372 | SVGASignedRect *blits; | ||
401 | 373 | ||
402 | /** | ||
403 | * Strictly we should take the ttm_lock in read mode before accessing | ||
404 | * the fifo, to make sure the fifo is present and up. However, | ||
405 | * instead we flush all workqueues under the ttm lock in exclusive mode | ||
406 | * before taking down the fifo. | ||
407 | */ | ||
408 | mutex_lock(&vfbs->work_lock); | ||
409 | if (!vfbs->present_fs) | ||
410 | goto out_unlock; | ||
411 | |||
412 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
413 | if (unlikely(cmd == NULL)) | ||
414 | goto out_resched; | ||
415 | |||
416 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); | ||
417 | cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr)); | ||
418 | cmd->body.sid = cpu_to_le32(surf->res.id); | ||
419 | cmd->cr.x = cpu_to_le32(0); | ||
420 | cmd->cr.y = cpu_to_le32(0); | ||
421 | cmd->cr.srcx = cmd->cr.x; | ||
422 | cmd->cr.srcy = cmd->cr.y; | ||
423 | cmd->cr.w = cpu_to_le32(framebuffer->width); | ||
424 | cmd->cr.h = cpu_to_le32(framebuffer->height); | ||
425 | vfbs->present_fs = false; | ||
426 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
427 | out_resched: | ||
428 | /** | ||
429 | * Will not re-add if already pending. | ||
430 | */ | ||
431 | schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); | ||
432 | out_unlock: | ||
433 | mutex_unlock(&vfbs->work_lock); | ||
434 | } | ||
435 | 374 | ||
375 | num_units = 0; | ||
376 | list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, | ||
377 | head) { | ||
378 | if (crtc->fb != &framebuffer->base) | ||
379 | continue; | ||
380 | units[num_units++] = vmw_crtc_to_du(crtc); | ||
381 | } | ||
382 | |||
383 | BUG_ON(!clips || !num_clips); | ||
384 | |||
385 | fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; | ||
386 | cmd = kzalloc(fifo_size, GFP_KERNEL); | ||
387 | if (unlikely(cmd == NULL)) { | ||
388 | DRM_ERROR("Temporary fifo memory alloc failed.\n"); | ||
389 | return -ENOMEM; | ||
390 | } | ||
391 | |||
392 | left = clips->x1; | ||
393 | right = clips->x2; | ||
394 | top = clips->y1; | ||
395 | bottom = clips->y2; | ||
396 | |||
397 | clips_ptr = clips; | ||
398 | for (i = 1; i < num_clips; i++, clips_ptr += inc) { | ||
399 | left = min_t(int, left, (int)clips_ptr->x1); | ||
400 | right = max_t(int, right, (int)clips_ptr->x2); | ||
401 | top = min_t(int, top, (int)clips_ptr->y1); | ||
402 | bottom = max_t(int, bottom, (int)clips_ptr->y2); | ||
403 | } | ||
404 | |||
405 | /* only need to do this once */ | ||
406 | memset(cmd, 0, fifo_size); | ||
407 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); | ||
408 | cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); | ||
409 | |||
410 | cmd->body.srcRect.left = left; | ||
411 | cmd->body.srcRect.right = right; | ||
412 | cmd->body.srcRect.top = top; | ||
413 | cmd->body.srcRect.bottom = bottom; | ||
414 | |||
415 | clips_ptr = clips; | ||
416 | blits = (SVGASignedRect *)&cmd[1]; | ||
417 | for (i = 0; i < num_clips; i++, clips_ptr += inc) { | ||
418 | blits[i].left = clips_ptr->x1 - left; | ||
419 | blits[i].right = clips_ptr->x2 - left; | ||
420 | blits[i].top = clips_ptr->y1 - top; | ||
421 | blits[i].bottom = clips_ptr->y2 - top; | ||
422 | } | ||
423 | |||
424 | /* do per unit writing, reuse fifo for each */ | ||
425 | for (i = 0; i < num_units; i++) { | ||
426 | struct vmw_display_unit *unit = units[i]; | ||
427 | int clip_x1 = left - unit->crtc.x; | ||
428 | int clip_y1 = top - unit->crtc.y; | ||
429 | int clip_x2 = right - unit->crtc.x; | ||
430 | int clip_y2 = bottom - unit->crtc.y; | ||
431 | |||
432 | /* skip any crtcs that misses the clip region */ | ||
433 | if (clip_x1 >= unit->crtc.mode.hdisplay || | ||
434 | clip_y1 >= unit->crtc.mode.vdisplay || | ||
435 | clip_x2 <= 0 || clip_y2 <= 0) | ||
436 | continue; | ||
437 | |||
438 | /* need to reset sid as it is changed by execbuf */ | ||
439 | cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle); | ||
440 | |||
441 | cmd->body.destScreenId = unit->unit; | ||
442 | |||
443 | /* | ||
444 | * The blit command is a lot more resilient then the | ||
445 | * readback command when it comes to clip rects. So its | ||
446 | * okay to go out of bounds. | ||
447 | */ | ||
448 | |||
449 | cmd->body.destRect.left = clip_x1; | ||
450 | cmd->body.destRect.right = clip_x2; | ||
451 | cmd->body.destRect.top = clip_y1; | ||
452 | cmd->body.destRect.bottom = clip_y2; | ||
453 | |||
454 | |||
455 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, | ||
456 | fifo_size, 0, NULL); | ||
457 | |||
458 | if (unlikely(ret != 0)) | ||
459 | break; | ||
460 | } | ||
461 | |||
462 | kfree(cmd); | ||
463 | |||
464 | return ret; | ||
465 | } | ||
436 | 466 | ||
437 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | 467 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, |
438 | struct drm_file *file_priv, | 468 | struct drm_file *file_priv, |
@@ -444,44 +474,20 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
444 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 474 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
445 | struct vmw_framebuffer_surface *vfbs = | 475 | struct vmw_framebuffer_surface *vfbs = |
446 | vmw_framebuffer_to_vfbs(framebuffer); | 476 | vmw_framebuffer_to_vfbs(framebuffer); |
447 | struct vmw_surface *surf = vfbs->surface; | ||
448 | struct drm_clip_rect norect; | 477 | struct drm_clip_rect norect; |
449 | SVGA3dCopyRect *cr; | 478 | int ret, inc = 1; |
450 | int i, inc = 1; | ||
451 | int ret; | ||
452 | |||
453 | struct { | ||
454 | SVGA3dCmdHeader header; | ||
455 | SVGA3dCmdPresent body; | ||
456 | SVGA3dCopyRect cr; | ||
457 | } *cmd; | ||
458 | 479 | ||
459 | if (unlikely(vfbs->master != file_priv->master)) | 480 | if (unlikely(vfbs->master != file_priv->master)) |
460 | return -EINVAL; | 481 | return -EINVAL; |
461 | 482 | ||
483 | /* Require ScreenObject support for 3D */ | ||
484 | if (!dev_priv->sou_priv) | ||
485 | return -EINVAL; | ||
486 | |||
462 | ret = ttm_read_lock(&vmaster->lock, true); | 487 | ret = ttm_read_lock(&vmaster->lock, true); |
463 | if (unlikely(ret != 0)) | 488 | if (unlikely(ret != 0)) |
464 | return ret; | 489 | return ret; |
465 | 490 | ||
466 | if (!num_clips || | ||
467 | !(dev_priv->fifo.capabilities & | ||
468 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { | ||
469 | int ret; | ||
470 | |||
471 | mutex_lock(&vfbs->work_lock); | ||
472 | vfbs->present_fs = true; | ||
473 | ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); | ||
474 | mutex_unlock(&vfbs->work_lock); | ||
475 | if (ret) { | ||
476 | /** | ||
477 | * No work pending, Force immediate present. | ||
478 | */ | ||
479 | vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); | ||
480 | } | ||
481 | ttm_read_unlock(&vmaster->lock); | ||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | if (!num_clips) { | 491 | if (!num_clips) { |
486 | num_clips = 1; | 492 | num_clips = 1; |
487 | clips = &norect; | 493 | clips = &norect; |
@@ -493,29 +499,10 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
493 | inc = 2; /* skip source rects */ | 499 | inc = 2; /* skip source rects */ |
494 | } | 500 | } |
495 | 501 | ||
496 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | 502 | ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base, |
497 | if (unlikely(cmd == NULL)) { | 503 | flags, color, |
498 | DRM_ERROR("Fifo reserve failed.\n"); | 504 | clips, num_clips, inc); |
499 | ttm_read_unlock(&vmaster->lock); | ||
500 | return -ENOMEM; | ||
501 | } | ||
502 | |||
503 | memset(cmd, 0, sizeof(*cmd)); | ||
504 | |||
505 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); | ||
506 | cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr)); | ||
507 | cmd->body.sid = cpu_to_le32(surf->res.id); | ||
508 | |||
509 | for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) { | ||
510 | cr->x = cpu_to_le16(clips->x1); | ||
511 | cr->y = cpu_to_le16(clips->y1); | ||
512 | cr->srcx = cr->x; | ||
513 | cr->srcy = cr->y; | ||
514 | cr->w = cpu_to_le16(clips->x2 - clips->x1); | ||
515 | cr->h = cpu_to_le16(clips->y2 - clips->y1); | ||
516 | } | ||
517 | 505 | ||
518 | vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | ||
519 | ttm_read_unlock(&vmaster->lock); | 506 | ttm_read_unlock(&vmaster->lock); |
520 | return 0; | 507 | return 0; |
521 | } | 508 | } |
@@ -540,6 +527,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
540 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 527 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
541 | int ret; | 528 | int ret; |
542 | 529 | ||
530 | /* 3D is only supported on HWv8 hosts which supports screen objects */ | ||
531 | if (!dev_priv->sou_priv) | ||
532 | return -ENOSYS; | ||
533 | |||
543 | /* | 534 | /* |
544 | * Sanity checks. | 535 | * Sanity checks. |
545 | */ | 536 | */ |
@@ -567,6 +558,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
567 | case 15: | 558 | case 15: |
568 | format = SVGA3D_A1R5G5B5; | 559 | format = SVGA3D_A1R5G5B5; |
569 | break; | 560 | break; |
561 | case 8: | ||
562 | format = SVGA3D_LUMINANCE8; | ||
563 | break; | ||
570 | default: | 564 | default: |
571 | DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); | 565 | DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); |
572 | return -EINVAL; | 566 | return -EINVAL; |
@@ -599,14 +593,11 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
599 | vfbs->base.base.depth = mode_cmd->depth; | 593 | vfbs->base.base.depth = mode_cmd->depth; |
600 | vfbs->base.base.width = mode_cmd->width; | 594 | vfbs->base.base.width = mode_cmd->width; |
601 | vfbs->base.base.height = mode_cmd->height; | 595 | vfbs->base.base.height = mode_cmd->height; |
602 | vfbs->base.pin = &vmw_surface_dmabuf_pin; | ||
603 | vfbs->base.unpin = &vmw_surface_dmabuf_unpin; | ||
604 | vfbs->surface = surface; | 596 | vfbs->surface = surface; |
597 | vfbs->base.user_handle = mode_cmd->handle; | ||
605 | vfbs->master = drm_master_get(file_priv->master); | 598 | vfbs->master = drm_master_get(file_priv->master); |
606 | mutex_init(&vfbs->work_lock); | ||
607 | 599 | ||
608 | mutex_lock(&vmaster->fb_surf_mutex); | 600 | mutex_lock(&vmaster->fb_surf_mutex); |
609 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); | ||
610 | list_add_tail(&vfbs->head, &vmaster->fb_surf); | 601 | list_add_tail(&vfbs->head, &vmaster->fb_surf); |
611 | mutex_unlock(&vmaster->fb_surf_mutex); | 602 | mutex_unlock(&vmaster->fb_surf_mutex); |
612 | 603 | ||
@@ -641,48 +632,33 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) | |||
641 | 632 | ||
642 | drm_framebuffer_cleanup(framebuffer); | 633 | drm_framebuffer_cleanup(framebuffer); |
643 | vmw_dmabuf_unreference(&vfbd->buffer); | 634 | vmw_dmabuf_unreference(&vfbd->buffer); |
635 | ttm_base_object_unref(&vfbd->base.user_obj); | ||
644 | 636 | ||
645 | kfree(vfbd); | 637 | kfree(vfbd); |
646 | } | 638 | } |
647 | 639 | ||
648 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | 640 | static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv, |
649 | struct drm_file *file_priv, | 641 | struct vmw_framebuffer *framebuffer, |
650 | unsigned flags, unsigned color, | 642 | unsigned flags, unsigned color, |
651 | struct drm_clip_rect *clips, | 643 | struct drm_clip_rect *clips, |
652 | unsigned num_clips) | 644 | unsigned num_clips, int increment) |
653 | { | 645 | { |
654 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | 646 | size_t fifo_size; |
655 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 647 | int i; |
656 | struct drm_clip_rect norect; | 648 | |
657 | int ret; | ||
658 | struct { | 649 | struct { |
659 | uint32_t header; | 650 | uint32_t header; |
660 | SVGAFifoCmdUpdate body; | 651 | SVGAFifoCmdUpdate body; |
661 | } *cmd; | 652 | } *cmd; |
662 | int i, increment = 1; | ||
663 | |||
664 | ret = ttm_read_lock(&vmaster->lock, true); | ||
665 | if (unlikely(ret != 0)) | ||
666 | return ret; | ||
667 | |||
668 | if (!num_clips) { | ||
669 | num_clips = 1; | ||
670 | clips = &norect; | ||
671 | norect.x1 = norect.y1 = 0; | ||
672 | norect.x2 = framebuffer->width; | ||
673 | norect.y2 = framebuffer->height; | ||
674 | } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { | ||
675 | num_clips /= 2; | ||
676 | increment = 2; | ||
677 | } | ||
678 | 653 | ||
679 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); | 654 | fifo_size = sizeof(*cmd) * num_clips; |
655 | cmd = vmw_fifo_reserve(dev_priv, fifo_size); | ||
680 | if (unlikely(cmd == NULL)) { | 656 | if (unlikely(cmd == NULL)) { |
681 | DRM_ERROR("Fifo reserve failed.\n"); | 657 | DRM_ERROR("Fifo reserve failed.\n"); |
682 | ttm_read_unlock(&vmaster->lock); | ||
683 | return -ENOMEM; | 658 | return -ENOMEM; |
684 | } | 659 | } |
685 | 660 | ||
661 | memset(cmd, 0, fifo_size); | ||
686 | for (i = 0; i < num_clips; i++, clips += increment) { | 662 | for (i = 0; i < num_clips; i++, clips += increment) { |
687 | cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); | 663 | cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); |
688 | cmd[i].body.x = cpu_to_le32(clips->x1); | 664 | cmd[i].body.x = cpu_to_le32(clips->x1); |
@@ -691,57 +667,186 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
691 | cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); | 667 | cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); |
692 | } | 668 | } |
693 | 669 | ||
694 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); | 670 | vmw_fifo_commit(dev_priv, fifo_size); |
695 | ttm_read_unlock(&vmaster->lock); | ||
696 | |||
697 | return 0; | 671 | return 0; |
698 | } | 672 | } |
699 | 673 | ||
700 | static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { | 674 | static int do_dmabuf_define_gmrfb(struct drm_file *file_priv, |
701 | .destroy = vmw_framebuffer_dmabuf_destroy, | 675 | struct vmw_private *dev_priv, |
702 | .dirty = vmw_framebuffer_dmabuf_dirty, | 676 | struct vmw_framebuffer *framebuffer) |
703 | .create_handle = vmw_framebuffer_create_handle, | ||
704 | }; | ||
705 | |||
706 | static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) | ||
707 | { | 677 | { |
708 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | 678 | int depth = framebuffer->base.depth; |
709 | struct vmw_framebuffer_surface *vfbs = | 679 | size_t fifo_size; |
710 | vmw_framebuffer_to_vfbs(&vfb->base); | ||
711 | unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height; | ||
712 | int ret; | 680 | int ret; |
713 | 681 | ||
714 | vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL); | 682 | struct { |
715 | if (unlikely(vfbs->buffer == NULL)) | 683 | uint32_t header; |
684 | SVGAFifoCmdDefineGMRFB body; | ||
685 | } *cmd; | ||
686 | |||
687 | /* Emulate RGBA support, contrary to svga_reg.h this is not | ||
688 | * supported by hosts. This is only a problem if we are reading | ||
689 | * this value later and expecting what we uploaded back. | ||
690 | */ | ||
691 | if (depth == 32) | ||
692 | depth = 24; | ||
693 | |||
694 | fifo_size = sizeof(*cmd); | ||
695 | cmd = kmalloc(fifo_size, GFP_KERNEL); | ||
696 | if (unlikely(cmd == NULL)) { | ||
697 | DRM_ERROR("Failed to allocate temporary cmd buffer.\n"); | ||
716 | return -ENOMEM; | 698 | return -ENOMEM; |
699 | } | ||
717 | 700 | ||
718 | vmw_overlay_pause_all(dev_priv); | 701 | memset(cmd, 0, fifo_size); |
719 | ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size, | 702 | cmd->header = SVGA_CMD_DEFINE_GMRFB; |
720 | &vmw_vram_ne_placement, | 703 | cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel; |
721 | false, &vmw_dmabuf_bo_free); | 704 | cmd->body.format.colorDepth = depth; |
722 | vmw_overlay_resume_all(dev_priv); | 705 | cmd->body.format.reserved = 0; |
706 | cmd->body.bytesPerLine = framebuffer->base.pitch; | ||
707 | cmd->body.ptr.gmrId = framebuffer->user_handle; | ||
708 | cmd->body.ptr.offset = 0; | ||
709 | |||
710 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, | ||
711 | fifo_size, 0, NULL); | ||
712 | |||
713 | kfree(cmd); | ||
714 | |||
715 | return ret; | ||
716 | } | ||
717 | |||
718 | static int do_dmabuf_dirty_sou(struct drm_file *file_priv, | ||
719 | struct vmw_private *dev_priv, | ||
720 | struct vmw_framebuffer *framebuffer, | ||
721 | unsigned flags, unsigned color, | ||
722 | struct drm_clip_rect *clips, | ||
723 | unsigned num_clips, int increment) | ||
724 | { | ||
725 | struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; | ||
726 | struct drm_clip_rect *clips_ptr; | ||
727 | int i, k, num_units, ret; | ||
728 | struct drm_crtc *crtc; | ||
729 | size_t fifo_size; | ||
730 | |||
731 | struct { | ||
732 | uint32_t header; | ||
733 | SVGAFifoCmdBlitGMRFBToScreen body; | ||
734 | } *blits; | ||
735 | |||
736 | ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer); | ||
723 | if (unlikely(ret != 0)) | 737 | if (unlikely(ret != 0)) |
724 | vfbs->buffer = NULL; | 738 | return ret; /* define_gmrfb prints warnings */ |
739 | |||
740 | fifo_size = sizeof(*blits) * num_clips; | ||
741 | blits = kmalloc(fifo_size, GFP_KERNEL); | ||
742 | if (unlikely(blits == NULL)) { | ||
743 | DRM_ERROR("Failed to allocate temporary cmd buffer.\n"); | ||
744 | return -ENOMEM; | ||
745 | } | ||
746 | |||
747 | num_units = 0; | ||
748 | list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { | ||
749 | if (crtc->fb != &framebuffer->base) | ||
750 | continue; | ||
751 | units[num_units++] = vmw_crtc_to_du(crtc); | ||
752 | } | ||
753 | |||
754 | for (k = 0; k < num_units; k++) { | ||
755 | struct vmw_display_unit *unit = units[k]; | ||
756 | int hit_num = 0; | ||
757 | |||
758 | clips_ptr = clips; | ||
759 | for (i = 0; i < num_clips; i++, clips_ptr += increment) { | ||
760 | int clip_x1 = clips_ptr->x1 - unit->crtc.x; | ||
761 | int clip_y1 = clips_ptr->y1 - unit->crtc.y; | ||
762 | int clip_x2 = clips_ptr->x2 - unit->crtc.x; | ||
763 | int clip_y2 = clips_ptr->y2 - unit->crtc.y; | ||
764 | |||
765 | /* skip any crtcs that misses the clip region */ | ||
766 | if (clip_x1 >= unit->crtc.mode.hdisplay || | ||
767 | clip_y1 >= unit->crtc.mode.vdisplay || | ||
768 | clip_x2 <= 0 || clip_y2 <= 0) | ||
769 | continue; | ||
770 | |||
771 | blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; | ||
772 | blits[hit_num].body.destScreenId = unit->unit; | ||
773 | blits[hit_num].body.srcOrigin.x = clips_ptr->x1; | ||
774 | blits[hit_num].body.srcOrigin.y = clips_ptr->y1; | ||
775 | blits[hit_num].body.destRect.left = clip_x1; | ||
776 | blits[hit_num].body.destRect.top = clip_y1; | ||
777 | blits[hit_num].body.destRect.right = clip_x2; | ||
778 | blits[hit_num].body.destRect.bottom = clip_y2; | ||
779 | hit_num++; | ||
780 | } | ||
781 | |||
782 | /* no clips hit the crtc */ | ||
783 | if (hit_num == 0) | ||
784 | continue; | ||
785 | |||
786 | fifo_size = sizeof(*blits) * hit_num; | ||
787 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits, | ||
788 | fifo_size, 0, NULL); | ||
789 | |||
790 | if (unlikely(ret != 0)) | ||
791 | break; | ||
792 | } | ||
793 | |||
794 | kfree(blits); | ||
725 | 795 | ||
726 | return ret; | 796 | return ret; |
727 | } | 797 | } |
728 | 798 | ||
729 | static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) | 799 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, |
800 | struct drm_file *file_priv, | ||
801 | unsigned flags, unsigned color, | ||
802 | struct drm_clip_rect *clips, | ||
803 | unsigned num_clips) | ||
730 | { | 804 | { |
731 | struct ttm_buffer_object *bo; | 805 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); |
732 | struct vmw_framebuffer_surface *vfbs = | 806 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
733 | vmw_framebuffer_to_vfbs(&vfb->base); | 807 | struct vmw_framebuffer_dmabuf *vfbd = |
808 | vmw_framebuffer_to_vfbd(framebuffer); | ||
809 | struct drm_clip_rect norect; | ||
810 | int ret, increment = 1; | ||
734 | 811 | ||
735 | if (unlikely(vfbs->buffer == NULL)) | 812 | ret = ttm_read_lock(&vmaster->lock, true); |
736 | return 0; | 813 | if (unlikely(ret != 0)) |
814 | return ret; | ||
815 | |||
816 | if (!num_clips) { | ||
817 | num_clips = 1; | ||
818 | clips = &norect; | ||
819 | norect.x1 = norect.y1 = 0; | ||
820 | norect.x2 = framebuffer->width; | ||
821 | norect.y2 = framebuffer->height; | ||
822 | } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { | ||
823 | num_clips /= 2; | ||
824 | increment = 2; | ||
825 | } | ||
737 | 826 | ||
738 | bo = &vfbs->buffer->base; | 827 | if (dev_priv->ldu_priv) { |
739 | ttm_bo_unref(&bo); | 828 | ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base, |
740 | vfbs->buffer = NULL; | 829 | flags, color, |
830 | clips, num_clips, increment); | ||
831 | } else { | ||
832 | ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base, | ||
833 | flags, color, | ||
834 | clips, num_clips, increment); | ||
835 | } | ||
741 | 836 | ||
742 | return 0; | 837 | ttm_read_unlock(&vmaster->lock); |
838 | return ret; | ||
743 | } | 839 | } |
744 | 840 | ||
841 | static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { | ||
842 | .destroy = vmw_framebuffer_dmabuf_destroy, | ||
843 | .dirty = vmw_framebuffer_dmabuf_dirty, | ||
844 | .create_handle = vmw_framebuffer_create_handle, | ||
845 | }; | ||
846 | |||
847 | /** | ||
848 | * Pin the dmabuffer to the start of vram. | ||
849 | */ | ||
745 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | 850 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) |
746 | { | 851 | { |
747 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | 852 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); |
@@ -749,10 +854,12 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | |||
749 | vmw_framebuffer_to_vfbd(&vfb->base); | 854 | vmw_framebuffer_to_vfbd(&vfb->base); |
750 | int ret; | 855 | int ret; |
751 | 856 | ||
857 | /* This code should not be used with screen objects */ | ||
858 | BUG_ON(dev_priv->sou_priv); | ||
752 | 859 | ||
753 | vmw_overlay_pause_all(dev_priv); | 860 | vmw_overlay_pause_all(dev_priv); |
754 | 861 | ||
755 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); | 862 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false); |
756 | 863 | ||
757 | vmw_overlay_resume_all(dev_priv); | 864 | vmw_overlay_resume_all(dev_priv); |
758 | 865 | ||
@@ -772,7 +879,7 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) | |||
772 | return 0; | 879 | return 0; |
773 | } | 880 | } |
774 | 881 | ||
775 | return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); | 882 | return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false); |
776 | } | 883 | } |
777 | 884 | ||
778 | static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | 885 | static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, |
@@ -794,6 +901,33 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | |||
794 | return -EINVAL; | 901 | return -EINVAL; |
795 | } | 902 | } |
796 | 903 | ||
904 | /* Limited framebuffer color depth support for screen objects */ | ||
905 | if (dev_priv->sou_priv) { | ||
906 | switch (mode_cmd->depth) { | ||
907 | case 32: | ||
908 | case 24: | ||
909 | /* Only support 32 bpp for 32 and 24 depth fbs */ | ||
910 | if (mode_cmd->bpp == 32) | ||
911 | break; | ||
912 | |||
913 | DRM_ERROR("Invalid color depth/bbp: %d %d\n", | ||
914 | mode_cmd->depth, mode_cmd->bpp); | ||
915 | return -EINVAL; | ||
916 | case 16: | ||
917 | case 15: | ||
918 | /* Only support 16 bpp for 16 and 15 depth fbs */ | ||
919 | if (mode_cmd->bpp == 16) | ||
920 | break; | ||
921 | |||
922 | DRM_ERROR("Invalid color depth/bbp: %d %d\n", | ||
923 | mode_cmd->depth, mode_cmd->bpp); | ||
924 | return -EINVAL; | ||
925 | default: | ||
926 | DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); | ||
927 | return -EINVAL; | ||
928 | } | ||
929 | } | ||
930 | |||
797 | vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); | 931 | vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); |
798 | if (!vfbd) { | 932 | if (!vfbd) { |
799 | ret = -ENOMEM; | 933 | ret = -ENOMEM; |
@@ -815,9 +949,13 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | |||
815 | vfbd->base.base.depth = mode_cmd->depth; | 949 | vfbd->base.base.depth = mode_cmd->depth; |
816 | vfbd->base.base.width = mode_cmd->width; | 950 | vfbd->base.base.width = mode_cmd->width; |
817 | vfbd->base.base.height = mode_cmd->height; | 951 | vfbd->base.base.height = mode_cmd->height; |
818 | vfbd->base.pin = vmw_framebuffer_dmabuf_pin; | 952 | if (!dev_priv->sou_priv) { |
819 | vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; | 953 | vfbd->base.pin = vmw_framebuffer_dmabuf_pin; |
954 | vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; | ||
955 | } | ||
956 | vfbd->base.dmabuf = true; | ||
820 | vfbd->buffer = dmabuf; | 957 | vfbd->buffer = dmabuf; |
958 | vfbd->base.user_handle = mode_cmd->handle; | ||
821 | *out = &vfbd->base; | 959 | *out = &vfbd->base; |
822 | 960 | ||
823 | return 0; | 961 | return 0; |
@@ -843,6 +981,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
843 | struct vmw_framebuffer *vfb = NULL; | 981 | struct vmw_framebuffer *vfb = NULL; |
844 | struct vmw_surface *surface = NULL; | 982 | struct vmw_surface *surface = NULL; |
845 | struct vmw_dma_buffer *bo = NULL; | 983 | struct vmw_dma_buffer *bo = NULL; |
984 | struct ttm_base_object *user_obj; | ||
846 | u64 required_size; | 985 | u64 required_size; |
847 | int ret; | 986 | int ret; |
848 | 987 | ||
@@ -858,6 +997,21 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
858 | return NULL; | 997 | return NULL; |
859 | } | 998 | } |
860 | 999 | ||
1000 | /* | ||
1001 | * Take a reference on the user object of the resource | ||
1002 | * backing the kms fb. This ensures that user-space handle | ||
1003 | * lookups on that resource will always work as long as | ||
1004 | * it's registered with a kms framebuffer. This is important, | ||
1005 | * since vmw_execbuf_process identifies resources in the | ||
1006 | * command stream using user-space handles. | ||
1007 | */ | ||
1008 | |||
1009 | user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle); | ||
1010 | if (unlikely(user_obj == NULL)) { | ||
1011 | DRM_ERROR("Could not locate requested kms frame buffer.\n"); | ||
1012 | return ERR_PTR(-ENOENT); | ||
1013 | } | ||
1014 | |||
861 | /** | 1015 | /** |
862 | * End conditioned code. | 1016 | * End conditioned code. |
863 | */ | 1017 | */ |
@@ -878,8 +1032,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
878 | 1032 | ||
879 | if (ret) { | 1033 | if (ret) { |
880 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); | 1034 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); |
1035 | ttm_base_object_unref(&user_obj); | ||
881 | return ERR_PTR(ret); | 1036 | return ERR_PTR(ret); |
882 | } | 1037 | } else |
1038 | vfb->user_obj = user_obj; | ||
883 | return &vfb->base; | 1039 | return &vfb->base; |
884 | 1040 | ||
885 | try_dmabuf: | 1041 | try_dmabuf: |
@@ -899,8 +1055,10 @@ try_dmabuf: | |||
899 | 1055 | ||
900 | if (ret) { | 1056 | if (ret) { |
901 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); | 1057 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); |
1058 | ttm_base_object_unref(&user_obj); | ||
902 | return ERR_PTR(ret); | 1059 | return ERR_PTR(ret); |
903 | } | 1060 | } else |
1061 | vfb->user_obj = user_obj; | ||
904 | 1062 | ||
905 | return &vfb->base; | 1063 | return &vfb->base; |
906 | 1064 | ||
@@ -908,6 +1066,7 @@ err_not_scanout: | |||
908 | DRM_ERROR("surface not marked as scanout\n"); | 1066 | DRM_ERROR("surface not marked as scanout\n"); |
909 | /* vmw_user_surface_lookup takes one ref */ | 1067 | /* vmw_user_surface_lookup takes one ref */ |
910 | vmw_surface_unreference(&surface); | 1068 | vmw_surface_unreference(&surface); |
1069 | ttm_base_object_unref(&user_obj); | ||
911 | 1070 | ||
912 | return ERR_PTR(-EINVAL); | 1071 | return ERR_PTR(-EINVAL); |
913 | } | 1072 | } |
@@ -916,6 +1075,210 @@ static struct drm_mode_config_funcs vmw_kms_funcs = { | |||
916 | .fb_create = vmw_kms_fb_create, | 1075 | .fb_create = vmw_kms_fb_create, |
917 | }; | 1076 | }; |
918 | 1077 | ||
1078 | int vmw_kms_present(struct vmw_private *dev_priv, | ||
1079 | struct drm_file *file_priv, | ||
1080 | struct vmw_framebuffer *vfb, | ||
1081 | struct vmw_surface *surface, | ||
1082 | uint32_t sid, | ||
1083 | int32_t destX, int32_t destY, | ||
1084 | struct drm_vmw_rect *clips, | ||
1085 | uint32_t num_clips) | ||
1086 | { | ||
1087 | struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; | ||
1088 | struct drm_crtc *crtc; | ||
1089 | size_t fifo_size; | ||
1090 | int i, k, num_units; | ||
1091 | int ret = 0; /* silence warning */ | ||
1092 | |||
1093 | struct { | ||
1094 | SVGA3dCmdHeader header; | ||
1095 | SVGA3dCmdBlitSurfaceToScreen body; | ||
1096 | } *cmd; | ||
1097 | SVGASignedRect *blits; | ||
1098 | |||
1099 | num_units = 0; | ||
1100 | list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { | ||
1101 | if (crtc->fb != &vfb->base) | ||
1102 | continue; | ||
1103 | units[num_units++] = vmw_crtc_to_du(crtc); | ||
1104 | } | ||
1105 | |||
1106 | BUG_ON(surface == NULL); | ||
1107 | BUG_ON(!clips || !num_clips); | ||
1108 | |||
1109 | fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; | ||
1110 | cmd = kmalloc(fifo_size, GFP_KERNEL); | ||
1111 | if (unlikely(cmd == NULL)) { | ||
1112 | DRM_ERROR("Failed to allocate temporary fifo memory.\n"); | ||
1113 | return -ENOMEM; | ||
1114 | } | ||
1115 | |||
1116 | /* only need to do this once */ | ||
1117 | memset(cmd, 0, fifo_size); | ||
1118 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); | ||
1119 | cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); | ||
1120 | |||
1121 | cmd->body.srcRect.left = 0; | ||
1122 | cmd->body.srcRect.right = surface->sizes[0].width; | ||
1123 | cmd->body.srcRect.top = 0; | ||
1124 | cmd->body.srcRect.bottom = surface->sizes[0].height; | ||
1125 | |||
1126 | blits = (SVGASignedRect *)&cmd[1]; | ||
1127 | for (i = 0; i < num_clips; i++) { | ||
1128 | blits[i].left = clips[i].x; | ||
1129 | blits[i].right = clips[i].x + clips[i].w; | ||
1130 | blits[i].top = clips[i].y; | ||
1131 | blits[i].bottom = clips[i].y + clips[i].h; | ||
1132 | } | ||
1133 | |||
1134 | for (k = 0; k < num_units; k++) { | ||
1135 | struct vmw_display_unit *unit = units[k]; | ||
1136 | int clip_x1 = destX - unit->crtc.x; | ||
1137 | int clip_y1 = destY - unit->crtc.y; | ||
1138 | int clip_x2 = clip_x1 + surface->sizes[0].width; | ||
1139 | int clip_y2 = clip_y1 + surface->sizes[0].height; | ||
1140 | |||
1141 | /* skip any crtcs that misses the clip region */ | ||
1142 | if (clip_x1 >= unit->crtc.mode.hdisplay || | ||
1143 | clip_y1 >= unit->crtc.mode.vdisplay || | ||
1144 | clip_x2 <= 0 || clip_y2 <= 0) | ||
1145 | continue; | ||
1146 | |||
1147 | /* need to reset sid as it is changed by execbuf */ | ||
1148 | cmd->body.srcImage.sid = sid; | ||
1149 | |||
1150 | cmd->body.destScreenId = unit->unit; | ||
1151 | |||
1152 | /* | ||
1153 | * The blit command is a lot more resilient then the | ||
1154 | * readback command when it comes to clip rects. So its | ||
1155 | * okay to go out of bounds. | ||
1156 | */ | ||
1157 | |||
1158 | cmd->body.destRect.left = clip_x1; | ||
1159 | cmd->body.destRect.right = clip_x2; | ||
1160 | cmd->body.destRect.top = clip_y1; | ||
1161 | cmd->body.destRect.bottom = clip_y2; | ||
1162 | |||
1163 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, | ||
1164 | fifo_size, 0, NULL); | ||
1165 | |||
1166 | if (unlikely(ret != 0)) | ||
1167 | break; | ||
1168 | } | ||
1169 | |||
1170 | kfree(cmd); | ||
1171 | |||
1172 | return ret; | ||
1173 | } | ||
1174 | |||
1175 | int vmw_kms_readback(struct vmw_private *dev_priv, | ||
1176 | struct drm_file *file_priv, | ||
1177 | struct vmw_framebuffer *vfb, | ||
1178 | struct drm_vmw_fence_rep __user *user_fence_rep, | ||
1179 | struct drm_vmw_rect *clips, | ||
1180 | uint32_t num_clips) | ||
1181 | { | ||
1182 | struct vmw_framebuffer_dmabuf *vfbd = | ||
1183 | vmw_framebuffer_to_vfbd(&vfb->base); | ||
1184 | struct vmw_dma_buffer *dmabuf = vfbd->buffer; | ||
1185 | struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; | ||
1186 | struct drm_crtc *crtc; | ||
1187 | size_t fifo_size; | ||
1188 | int i, k, ret, num_units, blits_pos; | ||
1189 | |||
1190 | struct { | ||
1191 | uint32_t header; | ||
1192 | SVGAFifoCmdDefineGMRFB body; | ||
1193 | } *cmd; | ||
1194 | struct { | ||
1195 | uint32_t header; | ||
1196 | SVGAFifoCmdBlitScreenToGMRFB body; | ||
1197 | } *blits; | ||
1198 | |||
1199 | num_units = 0; | ||
1200 | list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { | ||
1201 | if (crtc->fb != &vfb->base) | ||
1202 | continue; | ||
1203 | units[num_units++] = vmw_crtc_to_du(crtc); | ||
1204 | } | ||
1205 | |||
1206 | BUG_ON(dmabuf == NULL); | ||
1207 | BUG_ON(!clips || !num_clips); | ||
1208 | |||
1209 | /* take a safe guess at fifo size */ | ||
1210 | fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units; | ||
1211 | cmd = kmalloc(fifo_size, GFP_KERNEL); | ||
1212 | if (unlikely(cmd == NULL)) { | ||
1213 | DRM_ERROR("Failed to allocate temporary fifo memory.\n"); | ||
1214 | return -ENOMEM; | ||
1215 | } | ||
1216 | |||
1217 | memset(cmd, 0, fifo_size); | ||
1218 | cmd->header = SVGA_CMD_DEFINE_GMRFB; | ||
1219 | cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel; | ||
1220 | cmd->body.format.colorDepth = vfb->base.depth; | ||
1221 | cmd->body.format.reserved = 0; | ||
1222 | cmd->body.bytesPerLine = vfb->base.pitch; | ||
1223 | cmd->body.ptr.gmrId = vfb->user_handle; | ||
1224 | cmd->body.ptr.offset = 0; | ||
1225 | |||
1226 | blits = (void *)&cmd[1]; | ||
1227 | blits_pos = 0; | ||
1228 | for (i = 0; i < num_units; i++) { | ||
1229 | struct drm_vmw_rect *c = clips; | ||
1230 | for (k = 0; k < num_clips; k++, c++) { | ||
1231 | /* transform clip coords to crtc origin based coords */ | ||
1232 | int clip_x1 = c->x - units[i]->crtc.x; | ||
1233 | int clip_x2 = c->x - units[i]->crtc.x + c->w; | ||
1234 | int clip_y1 = c->y - units[i]->crtc.y; | ||
1235 | int clip_y2 = c->y - units[i]->crtc.y + c->h; | ||
1236 | int dest_x = c->x; | ||
1237 | int dest_y = c->y; | ||
1238 | |||
1239 | /* compensate for clipping, we negate | ||
1240 | * a negative number and add that. | ||
1241 | */ | ||
1242 | if (clip_x1 < 0) | ||
1243 | dest_x += -clip_x1; | ||
1244 | if (clip_y1 < 0) | ||
1245 | dest_y += -clip_y1; | ||
1246 | |||
1247 | /* clip */ | ||
1248 | clip_x1 = max(clip_x1, 0); | ||
1249 | clip_y1 = max(clip_y1, 0); | ||
1250 | clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay); | ||
1251 | clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay); | ||
1252 | |||
1253 | /* and cull any rects that misses the crtc */ | ||
1254 | if (clip_x1 >= units[i]->crtc.mode.hdisplay || | ||
1255 | clip_y1 >= units[i]->crtc.mode.vdisplay || | ||
1256 | clip_x2 <= 0 || clip_y2 <= 0) | ||
1257 | continue; | ||
1258 | |||
1259 | blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB; | ||
1260 | blits[blits_pos].body.srcScreenId = units[i]->unit; | ||
1261 | blits[blits_pos].body.destOrigin.x = dest_x; | ||
1262 | blits[blits_pos].body.destOrigin.y = dest_y; | ||
1263 | |||
1264 | blits[blits_pos].body.srcRect.left = clip_x1; | ||
1265 | blits[blits_pos].body.srcRect.top = clip_y1; | ||
1266 | blits[blits_pos].body.srcRect.right = clip_x2; | ||
1267 | blits[blits_pos].body.srcRect.bottom = clip_y2; | ||
1268 | blits_pos++; | ||
1269 | } | ||
1270 | } | ||
1271 | /* reset size here and use calculated exact size from loops */ | ||
1272 | fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos; | ||
1273 | |||
1274 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size, | ||
1275 | 0, user_fence_rep); | ||
1276 | |||
1277 | kfree(cmd); | ||
1278 | |||
1279 | return ret; | ||
1280 | } | ||
1281 | |||
919 | int vmw_kms_init(struct vmw_private *dev_priv) | 1282 | int vmw_kms_init(struct vmw_private *dev_priv) |
920 | { | 1283 | { |
921 | struct drm_device *dev = dev_priv->dev; | 1284 | struct drm_device *dev = dev_priv->dev; |
@@ -929,7 +1292,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) | |||
929 | dev->mode_config.max_width = 8192; | 1292 | dev->mode_config.max_width = 8192; |
930 | dev->mode_config.max_height = 8192; | 1293 | dev->mode_config.max_height = 8192; |
931 | 1294 | ||
932 | ret = vmw_kms_init_legacy_display_system(dev_priv); | 1295 | ret = vmw_kms_init_screen_object_display(dev_priv); |
1296 | if (ret) /* Fallback */ | ||
1297 | (void)vmw_kms_init_legacy_display_system(dev_priv); | ||
933 | 1298 | ||
934 | return 0; | 1299 | return 0; |
935 | } | 1300 | } |
@@ -987,9 +1352,9 @@ out: | |||
987 | return ret; | 1352 | return ret; |
988 | } | 1353 | } |
989 | 1354 | ||
990 | void vmw_kms_write_svga(struct vmw_private *vmw_priv, | 1355 | int vmw_kms_write_svga(struct vmw_private *vmw_priv, |
991 | unsigned width, unsigned height, unsigned pitch, | 1356 | unsigned width, unsigned height, unsigned pitch, |
992 | unsigned bbp, unsigned depth) | 1357 | unsigned bpp, unsigned depth) |
993 | { | 1358 | { |
994 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) | 1359 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) |
995 | vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); | 1360 | vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); |
@@ -997,11 +1362,15 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, | |||
997 | iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); | 1362 | iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); |
998 | vmw_write(vmw_priv, SVGA_REG_WIDTH, width); | 1363 | vmw_write(vmw_priv, SVGA_REG_WIDTH, width); |
999 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); | 1364 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); |
1000 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp); | 1365 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); |
1001 | vmw_write(vmw_priv, SVGA_REG_DEPTH, depth); | 1366 | |
1002 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | 1367 | if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) { |
1003 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | 1368 | DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n", |
1004 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | 1369 | depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH)); |
1370 | return -EINVAL; | ||
1371 | } | ||
1372 | |||
1373 | return 0; | ||
1005 | } | 1374 | } |
1006 | 1375 | ||
1007 | int vmw_kms_save_vga(struct vmw_private *vmw_priv) | 1376 | int vmw_kms_save_vga(struct vmw_private *vmw_priv) |
@@ -1011,12 +1380,7 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) | |||
1011 | 1380 | ||
1012 | vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); | 1381 | vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); |
1013 | vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); | 1382 | vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); |
1014 | vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); | ||
1015 | vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); | 1383 | vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); |
1016 | vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); | ||
1017 | vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); | ||
1018 | vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); | ||
1019 | vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); | ||
1020 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) | 1384 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) |
1021 | vmw_priv->vga_pitchlock = | 1385 | vmw_priv->vga_pitchlock = |
1022 | vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); | 1386 | vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); |
@@ -1065,12 +1429,7 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv) | |||
1065 | 1429 | ||
1066 | vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); | 1430 | vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); |
1067 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); | 1431 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); |
1068 | vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); | ||
1069 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); | 1432 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); |
1070 | vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); | ||
1071 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); | ||
1072 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); | ||
1073 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); | ||
1074 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) | 1433 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) |
1075 | vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, | 1434 | vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, |
1076 | vmw_priv->vga_pitchlock); | 1435 | vmw_priv->vga_pitchlock); |
@@ -1095,60 +1454,272 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv) | |||
1095 | return 0; | 1454 | return 0; |
1096 | } | 1455 | } |
1097 | 1456 | ||
1098 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | 1457 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, |
1099 | struct drm_file *file_priv) | 1458 | uint32_t pitch, |
1459 | uint32_t height) | ||
1100 | { | 1460 | { |
1101 | struct vmw_private *dev_priv = vmw_priv(dev); | 1461 | return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; |
1102 | struct drm_vmw_update_layout_arg *arg = | 1462 | } |
1103 | (struct drm_vmw_update_layout_arg *)data; | ||
1104 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1105 | void __user *user_rects; | ||
1106 | struct drm_vmw_rect *rects; | ||
1107 | unsigned rects_size; | ||
1108 | int ret; | ||
1109 | 1463 | ||
1110 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1111 | if (unlikely(ret != 0)) | ||
1112 | return ret; | ||
1113 | 1464 | ||
1114 | if (!arg->num_outputs) { | 1465 | /** |
1115 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; | 1466 | * Function called by DRM code called with vbl_lock held. |
1116 | vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect); | 1467 | */ |
1117 | goto out_unlock; | 1468 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) |
1118 | } | 1469 | { |
1470 | return 0; | ||
1471 | } | ||
1119 | 1472 | ||
1120 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); | 1473 | /** |
1121 | rects = kzalloc(rects_size, GFP_KERNEL); | 1474 | * Function called by DRM code called with vbl_lock held. |
1122 | if (unlikely(!rects)) { | 1475 | */ |
1123 | ret = -ENOMEM; | 1476 | int vmw_enable_vblank(struct drm_device *dev, int crtc) |
1124 | goto out_unlock; | 1477 | { |
1478 | return -ENOSYS; | ||
1479 | } | ||
1480 | |||
1481 | /** | ||
1482 | * Function called by DRM code called with vbl_lock held. | ||
1483 | */ | ||
1484 | void vmw_disable_vblank(struct drm_device *dev, int crtc) | ||
1485 | { | ||
1486 | } | ||
1487 | |||
1488 | |||
1489 | /* | ||
1490 | * Small shared kms functions. | ||
1491 | */ | ||
1492 | |||
1493 | int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
1494 | struct drm_vmw_rect *rects) | ||
1495 | { | ||
1496 | struct drm_device *dev = dev_priv->dev; | ||
1497 | struct vmw_display_unit *du; | ||
1498 | struct drm_connector *con; | ||
1499 | |||
1500 | mutex_lock(&dev->mode_config.mutex); | ||
1501 | |||
1502 | #if 0 | ||
1503 | { | ||
1504 | unsigned int i; | ||
1505 | |||
1506 | DRM_INFO("%s: new layout ", __func__); | ||
1507 | for (i = 0; i < num; i++) | ||
1508 | DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, | ||
1509 | rects[i].w, rects[i].h); | ||
1510 | DRM_INFO("\n"); | ||
1511 | } | ||
1512 | #endif | ||
1513 | |||
1514 | list_for_each_entry(con, &dev->mode_config.connector_list, head) { | ||
1515 | du = vmw_connector_to_du(con); | ||
1516 | if (num > du->unit) { | ||
1517 | du->pref_width = rects[du->unit].w; | ||
1518 | du->pref_height = rects[du->unit].h; | ||
1519 | du->pref_active = true; | ||
1520 | } else { | ||
1521 | du->pref_width = 800; | ||
1522 | du->pref_height = 600; | ||
1523 | du->pref_active = false; | ||
1524 | } | ||
1525 | con->status = vmw_du_connector_detect(con, true); | ||
1125 | } | 1526 | } |
1126 | 1527 | ||
1127 | user_rects = (void __user *)(unsigned long)arg->rects; | 1528 | mutex_unlock(&dev->mode_config.mutex); |
1128 | ret = copy_from_user(rects, user_rects, rects_size); | 1529 | |
1129 | if (unlikely(ret != 0)) { | 1530 | return 0; |
1130 | DRM_ERROR("Failed to get rects.\n"); | 1531 | } |
1131 | ret = -EFAULT; | 1532 | |
1132 | goto out_free; | 1533 | void vmw_du_crtc_save(struct drm_crtc *crtc) |
1534 | { | ||
1535 | } | ||
1536 | |||
1537 | void vmw_du_crtc_restore(struct drm_crtc *crtc) | ||
1538 | { | ||
1539 | } | ||
1540 | |||
1541 | void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, | ||
1542 | u16 *r, u16 *g, u16 *b, | ||
1543 | uint32_t start, uint32_t size) | ||
1544 | { | ||
1545 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | ||
1546 | int i; | ||
1547 | |||
1548 | for (i = 0; i < size; i++) { | ||
1549 | DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i, | ||
1550 | r[i], g[i], b[i]); | ||
1551 | vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8); | ||
1552 | vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); | ||
1553 | vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); | ||
1133 | } | 1554 | } |
1555 | } | ||
1134 | 1556 | ||
1135 | vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects); | 1557 | void vmw_du_connector_dpms(struct drm_connector *connector, int mode) |
1558 | { | ||
1559 | } | ||
1136 | 1560 | ||
1137 | out_free: | 1561 | void vmw_du_connector_save(struct drm_connector *connector) |
1138 | kfree(rects); | 1562 | { |
1139 | out_unlock: | ||
1140 | ttm_read_unlock(&vmaster->lock); | ||
1141 | return ret; | ||
1142 | } | 1563 | } |
1143 | 1564 | ||
1144 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, | 1565 | void vmw_du_connector_restore(struct drm_connector *connector) |
1145 | uint32_t pitch, | ||
1146 | uint32_t height) | ||
1147 | { | 1566 | { |
1148 | return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; | ||
1149 | } | 1567 | } |
1150 | 1568 | ||
1151 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) | 1569 | enum drm_connector_status |
1570 | vmw_du_connector_detect(struct drm_connector *connector, bool force) | ||
1571 | { | ||
1572 | uint32_t num_displays; | ||
1573 | struct drm_device *dev = connector->dev; | ||
1574 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1575 | |||
1576 | mutex_lock(&dev_priv->hw_mutex); | ||
1577 | num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); | ||
1578 | mutex_unlock(&dev_priv->hw_mutex); | ||
1579 | |||
1580 | return ((vmw_connector_to_du(connector)->unit < num_displays) ? | ||
1581 | connector_status_connected : connector_status_disconnected); | ||
1582 | } | ||
1583 | |||
1584 | static struct drm_display_mode vmw_kms_connector_builtin[] = { | ||
1585 | /* 640x480@60Hz */ | ||
1586 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, | ||
1587 | 752, 800, 0, 480, 489, 492, 525, 0, | ||
1588 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
1589 | /* 800x600@60Hz */ | ||
1590 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, | ||
1591 | 968, 1056, 0, 600, 601, 605, 628, 0, | ||
1592 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1593 | /* 1024x768@60Hz */ | ||
1594 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, | ||
1595 | 1184, 1344, 0, 768, 771, 777, 806, 0, | ||
1596 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
1597 | /* 1152x864@75Hz */ | ||
1598 | { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, | ||
1599 | 1344, 1600, 0, 864, 865, 868, 900, 0, | ||
1600 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1601 | /* 1280x768@60Hz */ | ||
1602 | { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, | ||
1603 | 1472, 1664, 0, 768, 771, 778, 798, 0, | ||
1604 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1605 | /* 1280x800@60Hz */ | ||
1606 | { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, | ||
1607 | 1480, 1680, 0, 800, 803, 809, 831, 0, | ||
1608 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
1609 | /* 1280x960@60Hz */ | ||
1610 | { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, | ||
1611 | 1488, 1800, 0, 960, 961, 964, 1000, 0, | ||
1612 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1613 | /* 1280x1024@60Hz */ | ||
1614 | { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, | ||
1615 | 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, | ||
1616 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1617 | /* 1360x768@60Hz */ | ||
1618 | { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, | ||
1619 | 1536, 1792, 0, 768, 771, 777, 795, 0, | ||
1620 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1621 | /* 1440x1050@60Hz */ | ||
1622 | { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, | ||
1623 | 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, | ||
1624 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1625 | /* 1440x900@60Hz */ | ||
1626 | { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, | ||
1627 | 1672, 1904, 0, 900, 903, 909, 934, 0, | ||
1628 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1629 | /* 1600x1200@60Hz */ | ||
1630 | { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, | ||
1631 | 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, | ||
1632 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1633 | /* 1680x1050@60Hz */ | ||
1634 | { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, | ||
1635 | 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, | ||
1636 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1637 | /* 1792x1344@60Hz */ | ||
1638 | { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, | ||
1639 | 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, | ||
1640 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1641 | /* 1853x1392@60Hz */ | ||
1642 | { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, | ||
1643 | 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, | ||
1644 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1645 | /* 1920x1200@60Hz */ | ||
1646 | { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, | ||
1647 | 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, | ||
1648 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1649 | /* 1920x1440@60Hz */ | ||
1650 | { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, | ||
1651 | 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, | ||
1652 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1653 | /* 2560x1600@60Hz */ | ||
1654 | { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, | ||
1655 | 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, | ||
1656 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1657 | /* Terminate */ | ||
1658 | { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, | ||
1659 | }; | ||
1660 | |||
1661 | int vmw_du_connector_fill_modes(struct drm_connector *connector, | ||
1662 | uint32_t max_width, uint32_t max_height) | ||
1663 | { | ||
1664 | struct vmw_display_unit *du = vmw_connector_to_du(connector); | ||
1665 | struct drm_device *dev = connector->dev; | ||
1666 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1667 | struct drm_display_mode *mode = NULL; | ||
1668 | struct drm_display_mode *bmode; | ||
1669 | struct drm_display_mode prefmode = { DRM_MODE("preferred", | ||
1670 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | ||
1671 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
1672 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) | ||
1673 | }; | ||
1674 | int i; | ||
1675 | |||
1676 | /* Add preferred mode */ | ||
1677 | { | ||
1678 | mode = drm_mode_duplicate(dev, &prefmode); | ||
1679 | if (!mode) | ||
1680 | return 0; | ||
1681 | mode->hdisplay = du->pref_width; | ||
1682 | mode->vdisplay = du->pref_height; | ||
1683 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
1684 | if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, | ||
1685 | mode->vdisplay)) { | ||
1686 | drm_mode_probed_add(connector, mode); | ||
1687 | |||
1688 | if (du->pref_mode) { | ||
1689 | list_del_init(&du->pref_mode->head); | ||
1690 | drm_mode_destroy(dev, du->pref_mode); | ||
1691 | } | ||
1692 | |||
1693 | du->pref_mode = mode; | ||
1694 | } | ||
1695 | } | ||
1696 | |||
1697 | for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { | ||
1698 | bmode = &vmw_kms_connector_builtin[i]; | ||
1699 | if (bmode->hdisplay > max_width || | ||
1700 | bmode->vdisplay > max_height) | ||
1701 | continue; | ||
1702 | |||
1703 | if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, | ||
1704 | bmode->vdisplay)) | ||
1705 | continue; | ||
1706 | |||
1707 | mode = drm_mode_duplicate(dev, bmode); | ||
1708 | if (!mode) | ||
1709 | return 0; | ||
1710 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
1711 | |||
1712 | drm_mode_probed_add(connector, mode); | ||
1713 | } | ||
1714 | |||
1715 | drm_mode_connector_list_update(connector); | ||
1716 | |||
1717 | return 1; | ||
1718 | } | ||
1719 | |||
1720 | int vmw_du_connector_set_property(struct drm_connector *connector, | ||
1721 | struct drm_property *property, | ||
1722 | uint64_t val) | ||
1152 | { | 1723 | { |
1153 | return 0; | 1724 | return 0; |
1154 | } | 1725 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 8a398a0339b6..db0b901f8c3f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
@@ -31,6 +31,8 @@ | |||
31 | #include "drmP.h" | 31 | #include "drmP.h" |
32 | #include "vmwgfx_drv.h" | 32 | #include "vmwgfx_drv.h" |
33 | 33 | ||
34 | #define VMWGFX_NUM_DISPLAY_UNITS 8 | ||
35 | |||
34 | 36 | ||
35 | #define vmw_framebuffer_to_vfb(x) \ | 37 | #define vmw_framebuffer_to_vfb(x) \ |
36 | container_of(x, struct vmw_framebuffer, base) | 38 | container_of(x, struct vmw_framebuffer, base) |
@@ -45,6 +47,9 @@ struct vmw_framebuffer { | |||
45 | struct drm_framebuffer base; | 47 | struct drm_framebuffer base; |
46 | int (*pin)(struct vmw_framebuffer *fb); | 48 | int (*pin)(struct vmw_framebuffer *fb); |
47 | int (*unpin)(struct vmw_framebuffer *fb); | 49 | int (*unpin)(struct vmw_framebuffer *fb); |
50 | bool dmabuf; | ||
51 | struct ttm_base_object *user_obj; | ||
52 | uint32_t user_handle; | ||
48 | }; | 53 | }; |
49 | 54 | ||
50 | 55 | ||
@@ -83,22 +88,59 @@ struct vmw_display_unit { | |||
83 | int hotspot_y; | 88 | int hotspot_y; |
84 | 89 | ||
85 | unsigned unit; | 90 | unsigned unit; |
91 | |||
92 | /* | ||
93 | * Prefered mode tracking. | ||
94 | */ | ||
95 | unsigned pref_width; | ||
96 | unsigned pref_height; | ||
97 | bool pref_active; | ||
98 | struct drm_display_mode *pref_mode; | ||
86 | }; | 99 | }; |
87 | 100 | ||
101 | #define vmw_crtc_to_du(x) \ | ||
102 | container_of(x, struct vmw_display_unit, crtc) | ||
103 | #define vmw_connector_to_du(x) \ | ||
104 | container_of(x, struct vmw_display_unit, connector) | ||
105 | |||
106 | |||
88 | /* | 107 | /* |
89 | * Shared display unit functions - vmwgfx_kms.c | 108 | * Shared display unit functions - vmwgfx_kms.c |
90 | */ | 109 | */ |
91 | void vmw_display_unit_cleanup(struct vmw_display_unit *du); | 110 | void vmw_display_unit_cleanup(struct vmw_display_unit *du); |
111 | void vmw_du_crtc_save(struct drm_crtc *crtc); | ||
112 | void vmw_du_crtc_restore(struct drm_crtc *crtc); | ||
113 | void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, | ||
114 | u16 *r, u16 *g, u16 *b, | ||
115 | uint32_t start, uint32_t size); | ||
92 | int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | 116 | int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, |
93 | uint32_t handle, uint32_t width, uint32_t height); | 117 | uint32_t handle, uint32_t width, uint32_t height); |
94 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); | 118 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); |
119 | void vmw_du_connector_dpms(struct drm_connector *connector, int mode); | ||
120 | void vmw_du_connector_save(struct drm_connector *connector); | ||
121 | void vmw_du_connector_restore(struct drm_connector *connector); | ||
122 | enum drm_connector_status | ||
123 | vmw_du_connector_detect(struct drm_connector *connector, bool force); | ||
124 | int vmw_du_connector_fill_modes(struct drm_connector *connector, | ||
125 | uint32_t max_width, uint32_t max_height); | ||
126 | int vmw_du_connector_set_property(struct drm_connector *connector, | ||
127 | struct drm_property *property, | ||
128 | uint64_t val); | ||
129 | int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
130 | struct drm_vmw_rect *rects); | ||
95 | 131 | ||
96 | /* | 132 | /* |
97 | * Legacy display unit functions - vmwgfx_ldu.c | 133 | * Legacy display unit functions - vmwgfx_ldu.c |
98 | */ | 134 | */ |
99 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); | 135 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); |
100 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); | 136 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); |
101 | int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, | 137 | |
138 | /* | ||
139 | * Screen Objects display functions - vmwgfx_scrn.c | ||
140 | */ | ||
141 | int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv); | ||
142 | int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv); | ||
143 | int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
102 | struct drm_vmw_rect *rects); | 144 | struct drm_vmw_rect *rects); |
103 | 145 | ||
104 | #endif | 146 | #endif |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index b3a2cd5118d7..92f56bc594eb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -27,7 +27,6 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_kms.h" | 28 | #include "vmwgfx_kms.h" |
29 | 29 | ||
30 | #define VMWGFX_LDU_NUM_DU 8 | ||
31 | 30 | ||
32 | #define vmw_crtc_to_ldu(x) \ | 31 | #define vmw_crtc_to_ldu(x) \ |
33 | container_of(x, struct vmw_legacy_display_unit, base.crtc) | 32 | container_of(x, struct vmw_legacy_display_unit, base.crtc) |
@@ -51,11 +50,6 @@ struct vmw_legacy_display { | |||
51 | struct vmw_legacy_display_unit { | 50 | struct vmw_legacy_display_unit { |
52 | struct vmw_display_unit base; | 51 | struct vmw_display_unit base; |
53 | 52 | ||
54 | unsigned pref_width; | ||
55 | unsigned pref_height; | ||
56 | bool pref_active; | ||
57 | struct drm_display_mode *pref_mode; | ||
58 | |||
59 | struct list_head active; | 53 | struct list_head active; |
60 | }; | 54 | }; |
61 | 55 | ||
@@ -71,20 +65,6 @@ static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) | |||
71 | * Legacy Display Unit CRTC functions | 65 | * Legacy Display Unit CRTC functions |
72 | */ | 66 | */ |
73 | 67 | ||
74 | static void vmw_ldu_crtc_save(struct drm_crtc *crtc) | ||
75 | { | ||
76 | } | ||
77 | |||
78 | static void vmw_ldu_crtc_restore(struct drm_crtc *crtc) | ||
79 | { | ||
80 | } | ||
81 | |||
82 | static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc, | ||
83 | u16 *r, u16 *g, u16 *b, | ||
84 | uint32_t start, uint32_t size) | ||
85 | { | ||
86 | } | ||
87 | |||
88 | static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc) | 68 | static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc) |
89 | { | 69 | { |
90 | vmw_ldu_destroy(vmw_crtc_to_ldu(crtc)); | 70 | vmw_ldu_destroy(vmw_crtc_to_ldu(crtc)); |
@@ -114,10 +94,8 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) | |||
114 | return 0; | 94 | return 0; |
115 | fb = entry->base.crtc.fb; | 95 | fb = entry->base.crtc.fb; |
116 | 96 | ||
117 | vmw_kms_write_svga(dev_priv, w, h, fb->pitch, | 97 | return vmw_kms_write_svga(dev_priv, w, h, fb->pitch, |
118 | fb->bits_per_pixel, fb->depth); | 98 | fb->bits_per_pixel, fb->depth); |
119 | |||
120 | return 0; | ||
121 | } | 99 | } |
122 | 100 | ||
123 | if (!list_empty(&lds->active)) { | 101 | if (!list_empty(&lds->active)) { |
@@ -265,9 +243,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) | |||
265 | 243 | ||
266 | vmw_ldu_del_active(dev_priv, ldu); | 244 | vmw_ldu_del_active(dev_priv, ldu); |
267 | 245 | ||
268 | vmw_ldu_commit_list(dev_priv); | 246 | return vmw_ldu_commit_list(dev_priv); |
269 | |||
270 | return 0; | ||
271 | } | 247 | } |
272 | 248 | ||
273 | 249 | ||
@@ -292,21 +268,20 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) | |||
292 | 268 | ||
293 | vmw_ldu_add_active(dev_priv, ldu, vfb); | 269 | vmw_ldu_add_active(dev_priv, ldu, vfb); |
294 | 270 | ||
295 | vmw_ldu_commit_list(dev_priv); | 271 | return vmw_ldu_commit_list(dev_priv); |
296 | |||
297 | return 0; | ||
298 | } | 272 | } |
299 | 273 | ||
300 | static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { | 274 | static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { |
301 | .save = vmw_ldu_crtc_save, | 275 | .save = vmw_du_crtc_save, |
302 | .restore = vmw_ldu_crtc_restore, | 276 | .restore = vmw_du_crtc_restore, |
303 | .cursor_set = vmw_du_crtc_cursor_set, | 277 | .cursor_set = vmw_du_crtc_cursor_set, |
304 | .cursor_move = vmw_du_crtc_cursor_move, | 278 | .cursor_move = vmw_du_crtc_cursor_move, |
305 | .gamma_set = vmw_ldu_crtc_gamma_set, | 279 | .gamma_set = vmw_du_crtc_gamma_set, |
306 | .destroy = vmw_ldu_crtc_destroy, | 280 | .destroy = vmw_ldu_crtc_destroy, |
307 | .set_config = vmw_ldu_crtc_set_config, | 281 | .set_config = vmw_ldu_crtc_set_config, |
308 | }; | 282 | }; |
309 | 283 | ||
284 | |||
310 | /* | 285 | /* |
311 | * Legacy Display Unit encoder functions | 286 | * Legacy Display Unit encoder functions |
312 | */ | 287 | */ |
@@ -324,183 +299,18 @@ static struct drm_encoder_funcs vmw_legacy_encoder_funcs = { | |||
324 | * Legacy Display Unit connector functions | 299 | * Legacy Display Unit connector functions |
325 | */ | 300 | */ |
326 | 301 | ||
327 | static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode) | ||
328 | { | ||
329 | } | ||
330 | |||
331 | static void vmw_ldu_connector_save(struct drm_connector *connector) | ||
332 | { | ||
333 | } | ||
334 | |||
335 | static void vmw_ldu_connector_restore(struct drm_connector *connector) | ||
336 | { | ||
337 | } | ||
338 | |||
339 | static enum drm_connector_status | ||
340 | vmw_ldu_connector_detect(struct drm_connector *connector, | ||
341 | bool force) | ||
342 | { | ||
343 | if (vmw_connector_to_ldu(connector)->pref_active) | ||
344 | return connector_status_connected; | ||
345 | return connector_status_disconnected; | ||
346 | } | ||
347 | |||
348 | static const struct drm_display_mode vmw_ldu_connector_builtin[] = { | ||
349 | /* 640x480@60Hz */ | ||
350 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, | ||
351 | 752, 800, 0, 480, 489, 492, 525, 0, | ||
352 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
353 | /* 800x600@60Hz */ | ||
354 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, | ||
355 | 968, 1056, 0, 600, 601, 605, 628, 0, | ||
356 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
357 | /* 1024x768@60Hz */ | ||
358 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, | ||
359 | 1184, 1344, 0, 768, 771, 777, 806, 0, | ||
360 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
361 | /* 1152x864@75Hz */ | ||
362 | { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, | ||
363 | 1344, 1600, 0, 864, 865, 868, 900, 0, | ||
364 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
365 | /* 1280x768@60Hz */ | ||
366 | { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, | ||
367 | 1472, 1664, 0, 768, 771, 778, 798, 0, | ||
368 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
369 | /* 1280x800@60Hz */ | ||
370 | { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, | ||
371 | 1480, 1680, 0, 800, 803, 809, 831, 0, | ||
372 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
373 | /* 1280x960@60Hz */ | ||
374 | { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, | ||
375 | 1488, 1800, 0, 960, 961, 964, 1000, 0, | ||
376 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
377 | /* 1280x1024@60Hz */ | ||
378 | { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, | ||
379 | 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, | ||
380 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
381 | /* 1360x768@60Hz */ | ||
382 | { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, | ||
383 | 1536, 1792, 0, 768, 771, 777, 795, 0, | ||
384 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
385 | /* 1440x1050@60Hz */ | ||
386 | { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, | ||
387 | 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, | ||
388 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
389 | /* 1440x900@60Hz */ | ||
390 | { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, | ||
391 | 1672, 1904, 0, 900, 903, 909, 934, 0, | ||
392 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
393 | /* 1600x1200@60Hz */ | ||
394 | { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, | ||
395 | 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, | ||
396 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
397 | /* 1680x1050@60Hz */ | ||
398 | { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, | ||
399 | 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, | ||
400 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
401 | /* 1792x1344@60Hz */ | ||
402 | { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, | ||
403 | 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, | ||
404 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
405 | /* 1853x1392@60Hz */ | ||
406 | { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, | ||
407 | 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, | ||
408 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
409 | /* 1920x1200@60Hz */ | ||
410 | { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, | ||
411 | 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, | ||
412 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
413 | /* 1920x1440@60Hz */ | ||
414 | { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, | ||
415 | 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, | ||
416 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
417 | /* 2560x1600@60Hz */ | ||
418 | { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, | ||
419 | 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, | ||
420 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
421 | /* Terminate */ | ||
422 | { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, | ||
423 | }; | ||
424 | |||
425 | static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, | ||
426 | uint32_t max_width, uint32_t max_height) | ||
427 | { | ||
428 | struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); | ||
429 | struct drm_device *dev = connector->dev; | ||
430 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
431 | struct drm_display_mode *mode = NULL; | ||
432 | struct drm_display_mode prefmode = { DRM_MODE("preferred", | ||
433 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | ||
434 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
435 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) | ||
436 | }; | ||
437 | int i; | ||
438 | |||
439 | /* Add preferred mode */ | ||
440 | { | ||
441 | mode = drm_mode_duplicate(dev, &prefmode); | ||
442 | if (!mode) | ||
443 | return 0; | ||
444 | mode->hdisplay = ldu->pref_width; | ||
445 | mode->vdisplay = ldu->pref_height; | ||
446 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
447 | if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, | ||
448 | mode->vdisplay)) { | ||
449 | drm_mode_probed_add(connector, mode); | ||
450 | |||
451 | if (ldu->pref_mode) { | ||
452 | list_del_init(&ldu->pref_mode->head); | ||
453 | drm_mode_destroy(dev, ldu->pref_mode); | ||
454 | } | ||
455 | |||
456 | ldu->pref_mode = mode; | ||
457 | } | ||
458 | } | ||
459 | |||
460 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { | ||
461 | const struct drm_display_mode *bmode; | ||
462 | |||
463 | bmode = &vmw_ldu_connector_builtin[i]; | ||
464 | if (bmode->hdisplay > max_width || | ||
465 | bmode->vdisplay > max_height) | ||
466 | continue; | ||
467 | |||
468 | if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, | ||
469 | bmode->vdisplay)) | ||
470 | continue; | ||
471 | |||
472 | mode = drm_mode_duplicate(dev, bmode); | ||
473 | if (!mode) | ||
474 | return 0; | ||
475 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
476 | |||
477 | drm_mode_probed_add(connector, mode); | ||
478 | } | ||
479 | |||
480 | drm_mode_connector_list_update(connector); | ||
481 | |||
482 | return 1; | ||
483 | } | ||
484 | |||
485 | static int vmw_ldu_connector_set_property(struct drm_connector *connector, | ||
486 | struct drm_property *property, | ||
487 | uint64_t val) | ||
488 | { | ||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | static void vmw_ldu_connector_destroy(struct drm_connector *connector) | 302 | static void vmw_ldu_connector_destroy(struct drm_connector *connector) |
493 | { | 303 | { |
494 | vmw_ldu_destroy(vmw_connector_to_ldu(connector)); | 304 | vmw_ldu_destroy(vmw_connector_to_ldu(connector)); |
495 | } | 305 | } |
496 | 306 | ||
497 | static struct drm_connector_funcs vmw_legacy_connector_funcs = { | 307 | static struct drm_connector_funcs vmw_legacy_connector_funcs = { |
498 | .dpms = vmw_ldu_connector_dpms, | 308 | .dpms = vmw_du_connector_dpms, |
499 | .save = vmw_ldu_connector_save, | 309 | .save = vmw_du_connector_save, |
500 | .restore = vmw_ldu_connector_restore, | 310 | .restore = vmw_du_connector_restore, |
501 | .detect = vmw_ldu_connector_detect, | 311 | .detect = vmw_du_connector_detect, |
502 | .fill_modes = vmw_ldu_connector_fill_modes, | 312 | .fill_modes = vmw_du_connector_fill_modes, |
503 | .set_property = vmw_ldu_connector_set_property, | 313 | .set_property = vmw_du_connector_set_property, |
504 | .destroy = vmw_ldu_connector_destroy, | 314 | .destroy = vmw_ldu_connector_destroy, |
505 | }; | 315 | }; |
506 | 316 | ||
@@ -523,14 +333,14 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
523 | 333 | ||
524 | INIT_LIST_HEAD(&ldu->active); | 334 | INIT_LIST_HEAD(&ldu->active); |
525 | 335 | ||
526 | ldu->pref_active = (unit == 0); | 336 | ldu->base.pref_active = (unit == 0); |
527 | ldu->pref_width = 800; | 337 | ldu->base.pref_width = 800; |
528 | ldu->pref_height = 600; | 338 | ldu->base.pref_height = 600; |
529 | ldu->pref_mode = NULL; | 339 | ldu->base.pref_mode = NULL; |
530 | 340 | ||
531 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | 341 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, |
532 | DRM_MODE_CONNECTOR_LVDS); | 342 | DRM_MODE_CONNECTOR_LVDS); |
533 | connector->status = vmw_ldu_connector_detect(connector, true); | 343 | connector->status = vmw_du_connector_detect(connector, true); |
534 | 344 | ||
535 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, | 345 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, |
536 | DRM_MODE_ENCODER_LVDS); | 346 | DRM_MODE_ENCODER_LVDS); |
@@ -540,6 +350,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
540 | 350 | ||
541 | drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); | 351 | drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); |
542 | 352 | ||
353 | drm_mode_crtc_set_gamma_size(crtc, 256); | ||
354 | |||
543 | drm_connector_attach_property(connector, | 355 | drm_connector_attach_property(connector, |
544 | dev->mode_config.dirty_info_property, | 356 | dev->mode_config.dirty_info_property, |
545 | 1); | 357 | 1); |
@@ -550,8 +362,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
550 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | 362 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) |
551 | { | 363 | { |
552 | struct drm_device *dev = dev_priv->dev; | 364 | struct drm_device *dev = dev_priv->dev; |
553 | int i; | 365 | int i, ret; |
554 | int ret; | ||
555 | 366 | ||
556 | if (dev_priv->ldu_priv) { | 367 | if (dev_priv->ldu_priv) { |
557 | DRM_INFO("ldu system already on\n"); | 368 | DRM_INFO("ldu system already on\n"); |
@@ -559,7 +370,6 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
559 | } | 370 | } |
560 | 371 | ||
561 | dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL); | 372 | dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL); |
562 | |||
563 | if (!dev_priv->ldu_priv) | 373 | if (!dev_priv->ldu_priv) |
564 | return -ENOMEM; | 374 | return -ENOMEM; |
565 | 375 | ||
@@ -568,18 +378,31 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
568 | dev_priv->ldu_priv->last_num_active = 0; | 378 | dev_priv->ldu_priv->last_num_active = 0; |
569 | dev_priv->ldu_priv->fb = NULL; | 379 | dev_priv->ldu_priv->fb = NULL; |
570 | 380 | ||
571 | drm_mode_create_dirty_info_property(dev_priv->dev); | 381 | /* for old hardware without multimon only enable one display */ |
382 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) | ||
383 | ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); | ||
384 | else | ||
385 | ret = drm_vblank_init(dev, 1); | ||
386 | if (ret != 0) | ||
387 | goto err_free; | ||
388 | |||
389 | ret = drm_mode_create_dirty_info_property(dev); | ||
390 | if (ret != 0) | ||
391 | goto err_vblank_cleanup; | ||
572 | 392 | ||
573 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | 393 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) |
574 | for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) | 394 | for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) |
575 | vmw_ldu_init(dev_priv, i); | 395 | vmw_ldu_init(dev_priv, i); |
576 | ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); | 396 | else |
577 | } else { | ||
578 | /* for old hardware without multimon only enable one display */ | ||
579 | vmw_ldu_init(dev_priv, 0); | 397 | vmw_ldu_init(dev_priv, 0); |
580 | ret = drm_vblank_init(dev, 1); | ||
581 | } | ||
582 | 398 | ||
399 | return 0; | ||
400 | |||
401 | err_vblank_cleanup: | ||
402 | drm_vblank_cleanup(dev); | ||
403 | err_free: | ||
404 | kfree(dev_priv->ldu_priv); | ||
405 | dev_priv->ldu_priv = NULL; | ||
583 | return ret; | 406 | return ret; |
584 | } | 407 | } |
585 | 408 | ||
@@ -587,52 +410,14 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) | |||
587 | { | 410 | { |
588 | struct drm_device *dev = dev_priv->dev; | 411 | struct drm_device *dev = dev_priv->dev; |
589 | 412 | ||
590 | drm_vblank_cleanup(dev); | ||
591 | if (!dev_priv->ldu_priv) | 413 | if (!dev_priv->ldu_priv) |
592 | return -ENOSYS; | 414 | return -ENOSYS; |
593 | 415 | ||
416 | drm_vblank_cleanup(dev); | ||
417 | |||
594 | BUG_ON(!list_empty(&dev_priv->ldu_priv->active)); | 418 | BUG_ON(!list_empty(&dev_priv->ldu_priv->active)); |
595 | 419 | ||
596 | kfree(dev_priv->ldu_priv); | 420 | kfree(dev_priv->ldu_priv); |
597 | 421 | ||
598 | return 0; | 422 | return 0; |
599 | } | 423 | } |
600 | |||
601 | int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
602 | struct drm_vmw_rect *rects) | ||
603 | { | ||
604 | struct drm_device *dev = dev_priv->dev; | ||
605 | struct vmw_legacy_display_unit *ldu; | ||
606 | struct drm_connector *con; | ||
607 | int i; | ||
608 | |||
609 | mutex_lock(&dev->mode_config.mutex); | ||
610 | |||
611 | #if 0 | ||
612 | DRM_INFO("%s: new layout ", __func__); | ||
613 | for (i = 0; i < (int)num; i++) | ||
614 | DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, | ||
615 | rects[i].w, rects[i].h); | ||
616 | DRM_INFO("\n"); | ||
617 | #else | ||
618 | (void)i; | ||
619 | #endif | ||
620 | |||
621 | list_for_each_entry(con, &dev->mode_config.connector_list, head) { | ||
622 | ldu = vmw_connector_to_ldu(con); | ||
623 | if (num > ldu->base.unit) { | ||
624 | ldu->pref_width = rects[ldu->base.unit].w; | ||
625 | ldu->pref_height = rects[ldu->base.unit].h; | ||
626 | ldu->pref_active = true; | ||
627 | } else { | ||
628 | ldu->pref_width = 800; | ||
629 | ldu->pref_height = 600; | ||
630 | ldu->pref_active = false; | ||
631 | } | ||
632 | con->status = vmw_ldu_connector_detect(con, true); | ||
633 | } | ||
634 | |||
635 | mutex_unlock(&dev->mode_config.mutex); | ||
636 | |||
637 | return 0; | ||
638 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c new file mode 100644 index 000000000000..8a8725c2716c --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c | |||
@@ -0,0 +1,171 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | |||
29 | #include "vmwgfx_drv.h" | ||
30 | |||
31 | struct vmw_marker { | ||
32 | struct list_head head; | ||
33 | uint32_t seqno; | ||
34 | struct timespec submitted; | ||
35 | }; | ||
36 | |||
37 | void vmw_marker_queue_init(struct vmw_marker_queue *queue) | ||
38 | { | ||
39 | INIT_LIST_HEAD(&queue->head); | ||
40 | queue->lag = ns_to_timespec(0); | ||
41 | getrawmonotonic(&queue->lag_time); | ||
42 | spin_lock_init(&queue->lock); | ||
43 | } | ||
44 | |||
45 | void vmw_marker_queue_takedown(struct vmw_marker_queue *queue) | ||
46 | { | ||
47 | struct vmw_marker *marker, *next; | ||
48 | |||
49 | spin_lock(&queue->lock); | ||
50 | list_for_each_entry_safe(marker, next, &queue->head, head) { | ||
51 | kfree(marker); | ||
52 | } | ||
53 | spin_unlock(&queue->lock); | ||
54 | } | ||
55 | |||
56 | int vmw_marker_push(struct vmw_marker_queue *queue, | ||
57 | uint32_t seqno) | ||
58 | { | ||
59 | struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL); | ||
60 | |||
61 | if (unlikely(!marker)) | ||
62 | return -ENOMEM; | ||
63 | |||
64 | marker->seqno = seqno; | ||
65 | getrawmonotonic(&marker->submitted); | ||
66 | spin_lock(&queue->lock); | ||
67 | list_add_tail(&marker->head, &queue->head); | ||
68 | spin_unlock(&queue->lock); | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | int vmw_marker_pull(struct vmw_marker_queue *queue, | ||
74 | uint32_t signaled_seqno) | ||
75 | { | ||
76 | struct vmw_marker *marker, *next; | ||
77 | struct timespec now; | ||
78 | bool updated = false; | ||
79 | |||
80 | spin_lock(&queue->lock); | ||
81 | getrawmonotonic(&now); | ||
82 | |||
83 | if (list_empty(&queue->head)) { | ||
84 | queue->lag = ns_to_timespec(0); | ||
85 | queue->lag_time = now; | ||
86 | updated = true; | ||
87 | goto out_unlock; | ||
88 | } | ||
89 | |||
90 | list_for_each_entry_safe(marker, next, &queue->head, head) { | ||
91 | if (signaled_seqno - marker->seqno > (1 << 30)) | ||
92 | continue; | ||
93 | |||
94 | queue->lag = timespec_sub(now, marker->submitted); | ||
95 | queue->lag_time = now; | ||
96 | updated = true; | ||
97 | list_del(&marker->head); | ||
98 | kfree(marker); | ||
99 | } | ||
100 | |||
101 | out_unlock: | ||
102 | spin_unlock(&queue->lock); | ||
103 | |||
104 | return (updated) ? 0 : -EBUSY; | ||
105 | } | ||
106 | |||
107 | static struct timespec vmw_timespec_add(struct timespec t1, | ||
108 | struct timespec t2) | ||
109 | { | ||
110 | t1.tv_sec += t2.tv_sec; | ||
111 | t1.tv_nsec += t2.tv_nsec; | ||
112 | if (t1.tv_nsec >= 1000000000L) { | ||
113 | t1.tv_sec += 1; | ||
114 | t1.tv_nsec -= 1000000000L; | ||
115 | } | ||
116 | |||
117 | return t1; | ||
118 | } | ||
119 | |||
120 | static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue) | ||
121 | { | ||
122 | struct timespec now; | ||
123 | |||
124 | spin_lock(&queue->lock); | ||
125 | getrawmonotonic(&now); | ||
126 | queue->lag = vmw_timespec_add(queue->lag, | ||
127 | timespec_sub(now, queue->lag_time)); | ||
128 | queue->lag_time = now; | ||
129 | spin_unlock(&queue->lock); | ||
130 | return queue->lag; | ||
131 | } | ||
132 | |||
133 | |||
134 | static bool vmw_lag_lt(struct vmw_marker_queue *queue, | ||
135 | uint32_t us) | ||
136 | { | ||
137 | struct timespec lag, cond; | ||
138 | |||
139 | cond = ns_to_timespec((s64) us * 1000); | ||
140 | lag = vmw_fifo_lag(queue); | ||
141 | return (timespec_compare(&lag, &cond) < 1); | ||
142 | } | ||
143 | |||
144 | int vmw_wait_lag(struct vmw_private *dev_priv, | ||
145 | struct vmw_marker_queue *queue, uint32_t us) | ||
146 | { | ||
147 | struct vmw_marker *marker; | ||
148 | uint32_t seqno; | ||
149 | int ret; | ||
150 | |||
151 | while (!vmw_lag_lt(queue, us)) { | ||
152 | spin_lock(&queue->lock); | ||
153 | if (list_empty(&queue->head)) | ||
154 | seqno = atomic_read(&dev_priv->marker_seq); | ||
155 | else { | ||
156 | marker = list_first_entry(&queue->head, | ||
157 | struct vmw_marker, head); | ||
158 | seqno = marker->seqno; | ||
159 | } | ||
160 | spin_unlock(&queue->lock); | ||
161 | |||
162 | ret = vmw_wait_seqno(dev_priv, false, seqno, true, | ||
163 | 3*HZ); | ||
164 | |||
165 | if (unlikely(ret != 0)) | ||
166 | return ret; | ||
167 | |||
168 | (void) vmw_marker_pull(queue, seqno); | ||
169 | } | ||
170 | return 0; | ||
171 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 07ce02da78a4..14399eec9c3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -87,48 +87,6 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd, | |||
87 | } | 87 | } |
88 | 88 | ||
89 | /** | 89 | /** |
90 | * Pin or unpin a buffer in vram. | ||
91 | * | ||
92 | * @dev_priv: Driver private. | ||
93 | * @buf: DMA buffer to pin or unpin. | ||
94 | * @pin: Pin buffer in vram if true. | ||
95 | * @interruptible: Use interruptible wait. | ||
96 | * | ||
97 | * Takes the current masters ttm lock in read. | ||
98 | * | ||
99 | * Returns | ||
100 | * -ERESTARTSYS if interrupted by a signal. | ||
101 | */ | ||
102 | static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, | ||
103 | struct vmw_dma_buffer *buf, | ||
104 | bool pin, bool interruptible) | ||
105 | { | ||
106 | struct ttm_buffer_object *bo = &buf->base; | ||
107 | struct ttm_placement *overlay_placement = &vmw_vram_placement; | ||
108 | int ret; | ||
109 | |||
110 | ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible); | ||
111 | if (unlikely(ret != 0)) | ||
112 | return ret; | ||
113 | |||
114 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | ||
115 | if (unlikely(ret != 0)) | ||
116 | goto err; | ||
117 | |||
118 | if (pin) | ||
119 | overlay_placement = &vmw_vram_ne_placement; | ||
120 | |||
121 | ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false); | ||
122 | |||
123 | ttm_bo_unreserve(bo); | ||
124 | |||
125 | err: | ||
126 | ttm_read_unlock(&dev_priv->active_master->lock); | ||
127 | |||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * Send put command to hw. | 90 | * Send put command to hw. |
133 | * | 91 | * |
134 | * Returns | 92 | * Returns |
@@ -139,68 +97,80 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv, | |||
139 | struct drm_vmw_control_stream_arg *arg, | 97 | struct drm_vmw_control_stream_arg *arg, |
140 | bool interruptible) | 98 | bool interruptible) |
141 | { | 99 | { |
100 | struct vmw_escape_video_flush *flush; | ||
101 | size_t fifo_size; | ||
102 | bool have_so = dev_priv->sou_priv ? true : false; | ||
103 | int i, num_items; | ||
104 | SVGAGuestPtr ptr; | ||
105 | |||
142 | struct { | 106 | struct { |
143 | struct vmw_escape_header escape; | 107 | struct vmw_escape_header escape; |
144 | struct { | 108 | struct { |
145 | struct { | 109 | uint32_t cmdType; |
146 | uint32_t cmdType; | 110 | uint32_t streamId; |
147 | uint32_t streamId; | 111 | } header; |
148 | } header; | ||
149 | struct { | ||
150 | uint32_t registerId; | ||
151 | uint32_t value; | ||
152 | } items[SVGA_VIDEO_PITCH_3 + 1]; | ||
153 | } body; | ||
154 | struct vmw_escape_video_flush flush; | ||
155 | } *cmds; | 112 | } *cmds; |
156 | uint32_t offset; | 113 | struct { |
157 | int i, ret; | 114 | uint32_t registerId; |
115 | uint32_t value; | ||
116 | } *items; | ||
158 | 117 | ||
159 | for (;;) { | 118 | /* defines are a index needs + 1 */ |
160 | cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds)); | 119 | if (have_so) |
161 | if (cmds) | 120 | num_items = SVGA_VIDEO_DST_SCREEN_ID + 1; |
162 | break; | 121 | else |
122 | num_items = SVGA_VIDEO_PITCH_3 + 1; | ||
163 | 123 | ||
164 | ret = vmw_fallback_wait(dev_priv, false, true, 0, | 124 | fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items; |
165 | interruptible, 3*HZ); | 125 | |
166 | if (interruptible && ret == -ERESTARTSYS) | 126 | cmds = vmw_fifo_reserve(dev_priv, fifo_size); |
167 | return ret; | 127 | /* hardware has hung, can't do anything here */ |
168 | else | 128 | if (!cmds) |
169 | BUG_ON(ret != 0); | 129 | return -ENOMEM; |
130 | |||
131 | items = (typeof(items))&cmds[1]; | ||
132 | flush = (struct vmw_escape_video_flush *)&items[num_items]; | ||
133 | |||
134 | /* the size is header + number of items */ | ||
135 | fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1)); | ||
136 | |||
137 | cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; | ||
138 | cmds->header.streamId = arg->stream_id; | ||
139 | |||
140 | /* the IDs are neatly numbered */ | ||
141 | for (i = 0; i < num_items; i++) | ||
142 | items[i].registerId = i; | ||
143 | |||
144 | vmw_bo_get_guest_ptr(&buf->base, &ptr); | ||
145 | ptr.offset += arg->offset; | ||
146 | |||
147 | items[SVGA_VIDEO_ENABLED].value = true; | ||
148 | items[SVGA_VIDEO_FLAGS].value = arg->flags; | ||
149 | items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset; | ||
150 | items[SVGA_VIDEO_FORMAT].value = arg->format; | ||
151 | items[SVGA_VIDEO_COLORKEY].value = arg->color_key; | ||
152 | items[SVGA_VIDEO_SIZE].value = arg->size; | ||
153 | items[SVGA_VIDEO_WIDTH].value = arg->width; | ||
154 | items[SVGA_VIDEO_HEIGHT].value = arg->height; | ||
155 | items[SVGA_VIDEO_SRC_X].value = arg->src.x; | ||
156 | items[SVGA_VIDEO_SRC_Y].value = arg->src.y; | ||
157 | items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w; | ||
158 | items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h; | ||
159 | items[SVGA_VIDEO_DST_X].value = arg->dst.x; | ||
160 | items[SVGA_VIDEO_DST_Y].value = arg->dst.y; | ||
161 | items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w; | ||
162 | items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h; | ||
163 | items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0]; | ||
164 | items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1]; | ||
165 | items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2]; | ||
166 | if (have_so) { | ||
167 | items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId; | ||
168 | items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID; | ||
170 | } | 169 | } |
171 | 170 | ||
172 | fill_escape(&cmds->escape, sizeof(cmds->body)); | 171 | fill_flush(flush, arg->stream_id); |
173 | cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; | ||
174 | cmds->body.header.streamId = arg->stream_id; | ||
175 | |||
176 | for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++) | ||
177 | cmds->body.items[i].registerId = i; | ||
178 | |||
179 | offset = buf->base.offset + arg->offset; | ||
180 | |||
181 | cmds->body.items[SVGA_VIDEO_ENABLED].value = true; | ||
182 | cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags; | ||
183 | cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset; | ||
184 | cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format; | ||
185 | cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key; | ||
186 | cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size; | ||
187 | cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width; | ||
188 | cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height; | ||
189 | cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x; | ||
190 | cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y; | ||
191 | cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w; | ||
192 | cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h; | ||
193 | cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x; | ||
194 | cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y; | ||
195 | cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w; | ||
196 | cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h; | ||
197 | cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0]; | ||
198 | cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1]; | ||
199 | cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2]; | ||
200 | |||
201 | fill_flush(&cmds->flush, arg->stream_id); | ||
202 | 172 | ||
203 | vmw_fifo_commit(dev_priv, sizeof(*cmds)); | 173 | vmw_fifo_commit(dev_priv, fifo_size); |
204 | 174 | ||
205 | return 0; | 175 | return 0; |
206 | } | 176 | } |
@@ -248,6 +218,25 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, | |||
248 | } | 218 | } |
249 | 219 | ||
250 | /** | 220 | /** |
221 | * Move a buffer to vram or gmr if @pin is set, else unpin the buffer. | ||
222 | * | ||
223 | * With the introduction of screen objects buffers could now be | ||
224 | * used with GMRs instead of being locked to vram. | ||
225 | */ | ||
226 | static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, | ||
227 | struct vmw_dma_buffer *buf, | ||
228 | bool pin, bool inter) | ||
229 | { | ||
230 | if (!pin) | ||
231 | return vmw_dmabuf_unpin(dev_priv, buf, inter); | ||
232 | |||
233 | if (!dev_priv->sou_priv) | ||
234 | return vmw_dmabuf_to_vram(dev_priv, buf, true, inter); | ||
235 | |||
236 | return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter); | ||
237 | } | ||
238 | |||
239 | /** | ||
251 | * Stop or pause a stream. | 240 | * Stop or pause a stream. |
252 | * | 241 | * |
253 | * If the stream is paused the no evict flag is removed from the buffer | 242 | * If the stream is paused the no evict flag is removed from the buffer |
@@ -279,8 +268,8 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, | |||
279 | return ret; | 268 | return ret; |
280 | 269 | ||
281 | /* We just remove the NO_EVICT flag so no -ENOMEM */ | 270 | /* We just remove the NO_EVICT flag so no -ENOMEM */ |
282 | ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false, | 271 | ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false, |
283 | interruptible); | 272 | interruptible); |
284 | if (interruptible && ret == -ERESTARTSYS) | 273 | if (interruptible && ret == -ERESTARTSYS) |
285 | return ret; | 274 | return ret; |
286 | else | 275 | else |
@@ -342,7 +331,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, | |||
342 | /* We don't start the old stream if we are interrupted. | 331 | /* We don't start the old stream if we are interrupted. |
343 | * Might return -ENOMEM if it can't fit the buffer in vram. | 332 | * Might return -ENOMEM if it can't fit the buffer in vram. |
344 | */ | 333 | */ |
345 | ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible); | 334 | ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible); |
346 | if (ret) | 335 | if (ret) |
347 | return ret; | 336 | return ret; |
348 | 337 | ||
@@ -351,7 +340,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, | |||
351 | /* This one needs to happen no matter what. We only remove | 340 | /* This one needs to happen no matter what. We only remove |
352 | * the NO_EVICT flag so this is safe from -ENOMEM. | 341 | * the NO_EVICT flag so this is safe from -ENOMEM. |
353 | */ | 342 | */ |
354 | BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0); | 343 | BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false) |
344 | != 0); | ||
355 | return ret; | 345 | return ret; |
356 | } | 346 | } |
357 | 347 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index bfe1bcce7f8a..86c5e4cceb31 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -31,10 +31,6 @@ | |||
31 | #include "ttm/ttm_placement.h" | 31 | #include "ttm/ttm_placement.h" |
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | 33 | ||
34 | #define VMW_RES_CONTEXT ttm_driver_type0 | ||
35 | #define VMW_RES_SURFACE ttm_driver_type1 | ||
36 | #define VMW_RES_STREAM ttm_driver_type2 | ||
37 | |||
38 | struct vmw_user_context { | 34 | struct vmw_user_context { |
39 | struct ttm_base_object base; | 35 | struct ttm_base_object base; |
40 | struct vmw_resource res; | 36 | struct vmw_resource res; |
@@ -43,6 +39,7 @@ struct vmw_user_context { | |||
43 | struct vmw_user_surface { | 39 | struct vmw_user_surface { |
44 | struct ttm_base_object base; | 40 | struct ttm_base_object base; |
45 | struct vmw_surface srf; | 41 | struct vmw_surface srf; |
42 | uint32_t size; | ||
46 | }; | 43 | }; |
47 | 44 | ||
48 | struct vmw_user_dma_buffer { | 45 | struct vmw_user_dma_buffer { |
@@ -65,6 +62,17 @@ struct vmw_user_stream { | |||
65 | struct vmw_stream stream; | 62 | struct vmw_stream stream; |
66 | }; | 63 | }; |
67 | 64 | ||
65 | struct vmw_surface_offset { | ||
66 | uint32_t face; | ||
67 | uint32_t mip; | ||
68 | uint32_t bo_offset; | ||
69 | }; | ||
70 | |||
71 | |||
72 | static uint64_t vmw_user_context_size; | ||
73 | static uint64_t vmw_user_surface_size; | ||
74 | static uint64_t vmw_user_stream_size; | ||
75 | |||
68 | static inline struct vmw_dma_buffer * | 76 | static inline struct vmw_dma_buffer * |
69 | vmw_dma_buffer(struct ttm_buffer_object *bo) | 77 | vmw_dma_buffer(struct ttm_buffer_object *bo) |
70 | { | 78 | { |
@@ -84,13 +92,36 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | |||
84 | return res; | 92 | return res; |
85 | } | 93 | } |
86 | 94 | ||
95 | |||
96 | /** | ||
97 | * vmw_resource_release_id - release a resource id to the id manager. | ||
98 | * | ||
99 | * @res: Pointer to the resource. | ||
100 | * | ||
101 | * Release the resource id to the resource id manager and set it to -1 | ||
102 | */ | ||
103 | static void vmw_resource_release_id(struct vmw_resource *res) | ||
104 | { | ||
105 | struct vmw_private *dev_priv = res->dev_priv; | ||
106 | |||
107 | write_lock(&dev_priv->resource_lock); | ||
108 | if (res->id != -1) | ||
109 | idr_remove(res->idr, res->id); | ||
110 | res->id = -1; | ||
111 | write_unlock(&dev_priv->resource_lock); | ||
112 | } | ||
113 | |||
87 | static void vmw_resource_release(struct kref *kref) | 114 | static void vmw_resource_release(struct kref *kref) |
88 | { | 115 | { |
89 | struct vmw_resource *res = | 116 | struct vmw_resource *res = |
90 | container_of(kref, struct vmw_resource, kref); | 117 | container_of(kref, struct vmw_resource, kref); |
91 | struct vmw_private *dev_priv = res->dev_priv; | 118 | struct vmw_private *dev_priv = res->dev_priv; |
119 | int id = res->id; | ||
120 | struct idr *idr = res->idr; | ||
92 | 121 | ||
93 | idr_remove(res->idr, res->id); | 122 | res->avail = false; |
123 | if (res->remove_from_lists != NULL) | ||
124 | res->remove_from_lists(res); | ||
94 | write_unlock(&dev_priv->resource_lock); | 125 | write_unlock(&dev_priv->resource_lock); |
95 | 126 | ||
96 | if (likely(res->hw_destroy != NULL)) | 127 | if (likely(res->hw_destroy != NULL)) |
@@ -102,6 +133,9 @@ static void vmw_resource_release(struct kref *kref) | |||
102 | kfree(res); | 133 | kfree(res); |
103 | 134 | ||
104 | write_lock(&dev_priv->resource_lock); | 135 | write_lock(&dev_priv->resource_lock); |
136 | |||
137 | if (id != -1) | ||
138 | idr_remove(idr, id); | ||
105 | } | 139 | } |
106 | 140 | ||
107 | void vmw_resource_unreference(struct vmw_resource **p_res) | 141 | void vmw_resource_unreference(struct vmw_resource **p_res) |
@@ -115,28 +149,29 @@ void vmw_resource_unreference(struct vmw_resource **p_res) | |||
115 | write_unlock(&dev_priv->resource_lock); | 149 | write_unlock(&dev_priv->resource_lock); |
116 | } | 150 | } |
117 | 151 | ||
118 | static int vmw_resource_init(struct vmw_private *dev_priv, | 152 | |
119 | struct vmw_resource *res, | 153 | /** |
120 | struct idr *idr, | 154 | * vmw_resource_alloc_id - release a resource id to the id manager. |
121 | enum ttm_object_type obj_type, | 155 | * |
122 | void (*res_free) (struct vmw_resource *res)) | 156 | * @dev_priv: Pointer to the device private structure. |
157 | * @res: Pointer to the resource. | ||
158 | * | ||
159 | * Allocate the lowest free resource from the resource manager, and set | ||
160 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. | ||
161 | */ | ||
162 | static int vmw_resource_alloc_id(struct vmw_private *dev_priv, | ||
163 | struct vmw_resource *res) | ||
123 | { | 164 | { |
124 | int ret; | 165 | int ret; |
125 | 166 | ||
126 | kref_init(&res->kref); | 167 | BUG_ON(res->id != -1); |
127 | res->hw_destroy = NULL; | ||
128 | res->res_free = res_free; | ||
129 | res->res_type = obj_type; | ||
130 | res->idr = idr; | ||
131 | res->avail = false; | ||
132 | res->dev_priv = dev_priv; | ||
133 | 168 | ||
134 | do { | 169 | do { |
135 | if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) | 170 | if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0)) |
136 | return -ENOMEM; | 171 | return -ENOMEM; |
137 | 172 | ||
138 | write_lock(&dev_priv->resource_lock); | 173 | write_lock(&dev_priv->resource_lock); |
139 | ret = idr_get_new_above(idr, res, 1, &res->id); | 174 | ret = idr_get_new_above(res->idr, res, 1, &res->id); |
140 | write_unlock(&dev_priv->resource_lock); | 175 | write_unlock(&dev_priv->resource_lock); |
141 | 176 | ||
142 | } while (ret == -EAGAIN); | 177 | } while (ret == -EAGAIN); |
@@ -144,6 +179,33 @@ static int vmw_resource_init(struct vmw_private *dev_priv, | |||
144 | return ret; | 179 | return ret; |
145 | } | 180 | } |
146 | 181 | ||
182 | |||
183 | static int vmw_resource_init(struct vmw_private *dev_priv, | ||
184 | struct vmw_resource *res, | ||
185 | struct idr *idr, | ||
186 | enum ttm_object_type obj_type, | ||
187 | bool delay_id, | ||
188 | void (*res_free) (struct vmw_resource *res), | ||
189 | void (*remove_from_lists) | ||
190 | (struct vmw_resource *res)) | ||
191 | { | ||
192 | kref_init(&res->kref); | ||
193 | res->hw_destroy = NULL; | ||
194 | res->res_free = res_free; | ||
195 | res->remove_from_lists = remove_from_lists; | ||
196 | res->res_type = obj_type; | ||
197 | res->idr = idr; | ||
198 | res->avail = false; | ||
199 | res->dev_priv = dev_priv; | ||
200 | INIT_LIST_HEAD(&res->query_head); | ||
201 | INIT_LIST_HEAD(&res->validate_head); | ||
202 | res->id = -1; | ||
203 | if (delay_id) | ||
204 | return 0; | ||
205 | else | ||
206 | return vmw_resource_alloc_id(dev_priv, res); | ||
207 | } | ||
208 | |||
147 | /** | 209 | /** |
148 | * vmw_resource_activate | 210 | * vmw_resource_activate |
149 | * | 211 | * |
@@ -198,8 +260,12 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
198 | struct { | 260 | struct { |
199 | SVGA3dCmdHeader header; | 261 | SVGA3dCmdHeader header; |
200 | SVGA3dCmdDestroyContext body; | 262 | SVGA3dCmdDestroyContext body; |
201 | } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 263 | } *cmd; |
202 | 264 | ||
265 | |||
266 | vmw_execbuf_release_pinned_bo(dev_priv, true, res->id); | ||
267 | |||
268 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
203 | if (unlikely(cmd == NULL)) { | 269 | if (unlikely(cmd == NULL)) { |
204 | DRM_ERROR("Failed reserving FIFO space for surface " | 270 | DRM_ERROR("Failed reserving FIFO space for surface " |
205 | "destruction.\n"); | 271 | "destruction.\n"); |
@@ -211,7 +277,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
211 | cmd->body.cid = cpu_to_le32(res->id); | 277 | cmd->body.cid = cpu_to_le32(res->id); |
212 | 278 | ||
213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 279 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
214 | vmw_3d_resource_dec(dev_priv); | 280 | vmw_3d_resource_dec(dev_priv, false); |
215 | } | 281 | } |
216 | 282 | ||
217 | static int vmw_context_init(struct vmw_private *dev_priv, | 283 | static int vmw_context_init(struct vmw_private *dev_priv, |
@@ -226,14 +292,17 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
226 | } *cmd; | 292 | } *cmd; |
227 | 293 | ||
228 | ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, | 294 | ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, |
229 | VMW_RES_CONTEXT, res_free); | 295 | VMW_RES_CONTEXT, false, res_free, NULL); |
230 | 296 | ||
231 | if (unlikely(ret != 0)) { | 297 | if (unlikely(ret != 0)) { |
232 | if (res_free == NULL) | 298 | DRM_ERROR("Failed to allocate a resource id.\n"); |
233 | kfree(res); | 299 | goto out_early; |
234 | else | 300 | } |
235 | res_free(res); | 301 | |
236 | return ret; | 302 | if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { |
303 | DRM_ERROR("Out of hw context ids.\n"); | ||
304 | vmw_resource_unreference(&res); | ||
305 | return -ENOMEM; | ||
237 | } | 306 | } |
238 | 307 | ||
239 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 308 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
@@ -248,9 +317,16 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
248 | cmd->body.cid = cpu_to_le32(res->id); | 317 | cmd->body.cid = cpu_to_le32(res->id); |
249 | 318 | ||
250 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 319 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
251 | (void) vmw_3d_resource_inc(dev_priv); | 320 | (void) vmw_3d_resource_inc(dev_priv, false); |
252 | vmw_resource_activate(res, vmw_hw_context_destroy); | 321 | vmw_resource_activate(res, vmw_hw_context_destroy); |
253 | return 0; | 322 | return 0; |
323 | |||
324 | out_early: | ||
325 | if (res_free == NULL) | ||
326 | kfree(res); | ||
327 | else | ||
328 | res_free(res); | ||
329 | return ret; | ||
254 | } | 330 | } |
255 | 331 | ||
256 | struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | 332 | struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) |
@@ -273,8 +349,11 @@ static void vmw_user_context_free(struct vmw_resource *res) | |||
273 | { | 349 | { |
274 | struct vmw_user_context *ctx = | 350 | struct vmw_user_context *ctx = |
275 | container_of(res, struct vmw_user_context, res); | 351 | container_of(res, struct vmw_user_context, res); |
352 | struct vmw_private *dev_priv = res->dev_priv; | ||
276 | 353 | ||
277 | kfree(ctx); | 354 | kfree(ctx); |
355 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
356 | vmw_user_context_size); | ||
278 | } | 357 | } |
279 | 358 | ||
280 | /** | 359 | /** |
@@ -328,23 +407,56 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, | |||
328 | struct drm_file *file_priv) | 407 | struct drm_file *file_priv) |
329 | { | 408 | { |
330 | struct vmw_private *dev_priv = vmw_priv(dev); | 409 | struct vmw_private *dev_priv = vmw_priv(dev); |
331 | struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | 410 | struct vmw_user_context *ctx; |
332 | struct vmw_resource *res; | 411 | struct vmw_resource *res; |
333 | struct vmw_resource *tmp; | 412 | struct vmw_resource *tmp; |
334 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | 413 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; |
335 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 414 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
415 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
336 | int ret; | 416 | int ret; |
337 | 417 | ||
338 | if (unlikely(ctx == NULL)) | 418 | |
339 | return -ENOMEM; | 419 | /* |
420 | * Approximate idr memory usage with 128 bytes. It will be limited | ||
421 | * by maximum number_of contexts anyway. | ||
422 | */ | ||
423 | |||
424 | if (unlikely(vmw_user_context_size == 0)) | ||
425 | vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; | ||
426 | |||
427 | ret = ttm_read_lock(&vmaster->lock, true); | ||
428 | if (unlikely(ret != 0)) | ||
429 | return ret; | ||
430 | |||
431 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
432 | vmw_user_context_size, | ||
433 | false, true); | ||
434 | if (unlikely(ret != 0)) { | ||
435 | if (ret != -ERESTARTSYS) | ||
436 | DRM_ERROR("Out of graphics memory for context" | ||
437 | " creation.\n"); | ||
438 | goto out_unlock; | ||
439 | } | ||
440 | |||
441 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | ||
442 | if (unlikely(ctx == NULL)) { | ||
443 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
444 | vmw_user_context_size); | ||
445 | ret = -ENOMEM; | ||
446 | goto out_unlock; | ||
447 | } | ||
340 | 448 | ||
341 | res = &ctx->res; | 449 | res = &ctx->res; |
342 | ctx->base.shareable = false; | 450 | ctx->base.shareable = false; |
343 | ctx->base.tfile = NULL; | 451 | ctx->base.tfile = NULL; |
344 | 452 | ||
453 | /* | ||
454 | * From here on, the destructor takes over resource freeing. | ||
455 | */ | ||
456 | |||
345 | ret = vmw_context_init(dev_priv, res, vmw_user_context_free); | 457 | ret = vmw_context_init(dev_priv, res, vmw_user_context_free); |
346 | if (unlikely(ret != 0)) | 458 | if (unlikely(ret != 0)) |
347 | return ret; | 459 | goto out_unlock; |
348 | 460 | ||
349 | tmp = vmw_resource_reference(&ctx->res); | 461 | tmp = vmw_resource_reference(&ctx->res); |
350 | ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, | 462 | ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, |
@@ -358,13 +470,16 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, | |||
358 | arg->cid = res->id; | 470 | arg->cid = res->id; |
359 | out_err: | 471 | out_err: |
360 | vmw_resource_unreference(&res); | 472 | vmw_resource_unreference(&res); |
473 | out_unlock: | ||
474 | ttm_read_unlock(&vmaster->lock); | ||
361 | return ret; | 475 | return ret; |
362 | 476 | ||
363 | } | 477 | } |
364 | 478 | ||
365 | int vmw_context_check(struct vmw_private *dev_priv, | 479 | int vmw_context_check(struct vmw_private *dev_priv, |
366 | struct ttm_object_file *tfile, | 480 | struct ttm_object_file *tfile, |
367 | int id) | 481 | int id, |
482 | struct vmw_resource **p_res) | ||
368 | { | 483 | { |
369 | struct vmw_resource *res; | 484 | struct vmw_resource *res; |
370 | int ret = 0; | 485 | int ret = 0; |
@@ -376,6 +491,8 @@ int vmw_context_check(struct vmw_private *dev_priv, | |||
376 | container_of(res, struct vmw_user_context, res); | 491 | container_of(res, struct vmw_user_context, res); |
377 | if (ctx->base.tfile != tfile && !ctx->base.shareable) | 492 | if (ctx->base.tfile != tfile && !ctx->base.shareable) |
378 | ret = -EPERM; | 493 | ret = -EPERM; |
494 | if (p_res) | ||
495 | *p_res = vmw_resource_reference(res); | ||
379 | } else | 496 | } else |
380 | ret = -EINVAL; | 497 | ret = -EINVAL; |
381 | read_unlock(&dev_priv->resource_lock); | 498 | read_unlock(&dev_priv->resource_lock); |
@@ -383,102 +500,638 @@ int vmw_context_check(struct vmw_private *dev_priv, | |||
383 | return ret; | 500 | return ret; |
384 | } | 501 | } |
385 | 502 | ||
503 | struct vmw_bpp { | ||
504 | uint8_t bpp; | ||
505 | uint8_t s_bpp; | ||
506 | }; | ||
507 | |||
508 | /* | ||
509 | * Size table for the supported SVGA3D surface formats. It consists of | ||
510 | * two values. The bpp value and the s_bpp value which is short for | ||
511 | * "stride bits per pixel" The values are given in such a way that the | ||
512 | * minimum stride for the image is calculated using | ||
513 | * | ||
514 | * min_stride = w*s_bpp | ||
515 | * | ||
516 | * and the total memory requirement for the image is | ||
517 | * | ||
518 | * h*min_stride*bpp/s_bpp | ||
519 | * | ||
520 | */ | ||
521 | static const struct vmw_bpp vmw_sf_bpp[] = { | ||
522 | [SVGA3D_FORMAT_INVALID] = {0, 0}, | ||
523 | [SVGA3D_X8R8G8B8] = {32, 32}, | ||
524 | [SVGA3D_A8R8G8B8] = {32, 32}, | ||
525 | [SVGA3D_R5G6B5] = {16, 16}, | ||
526 | [SVGA3D_X1R5G5B5] = {16, 16}, | ||
527 | [SVGA3D_A1R5G5B5] = {16, 16}, | ||
528 | [SVGA3D_A4R4G4B4] = {16, 16}, | ||
529 | [SVGA3D_Z_D32] = {32, 32}, | ||
530 | [SVGA3D_Z_D16] = {16, 16}, | ||
531 | [SVGA3D_Z_D24S8] = {32, 32}, | ||
532 | [SVGA3D_Z_D15S1] = {16, 16}, | ||
533 | [SVGA3D_LUMINANCE8] = {8, 8}, | ||
534 | [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8}, | ||
535 | [SVGA3D_LUMINANCE16] = {16, 16}, | ||
536 | [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16}, | ||
537 | [SVGA3D_DXT1] = {4, 16}, | ||
538 | [SVGA3D_DXT2] = {8, 32}, | ||
539 | [SVGA3D_DXT3] = {8, 32}, | ||
540 | [SVGA3D_DXT4] = {8, 32}, | ||
541 | [SVGA3D_DXT5] = {8, 32}, | ||
542 | [SVGA3D_BUMPU8V8] = {16, 16}, | ||
543 | [SVGA3D_BUMPL6V5U5] = {16, 16}, | ||
544 | [SVGA3D_BUMPX8L8V8U8] = {32, 32}, | ||
545 | [SVGA3D_ARGB_S10E5] = {16, 16}, | ||
546 | [SVGA3D_ARGB_S23E8] = {32, 32}, | ||
547 | [SVGA3D_A2R10G10B10] = {32, 32}, | ||
548 | [SVGA3D_V8U8] = {16, 16}, | ||
549 | [SVGA3D_Q8W8V8U8] = {32, 32}, | ||
550 | [SVGA3D_CxV8U8] = {16, 16}, | ||
551 | [SVGA3D_X8L8V8U8] = {32, 32}, | ||
552 | [SVGA3D_A2W10V10U10] = {32, 32}, | ||
553 | [SVGA3D_ALPHA8] = {8, 8}, | ||
554 | [SVGA3D_R_S10E5] = {16, 16}, | ||
555 | [SVGA3D_R_S23E8] = {32, 32}, | ||
556 | [SVGA3D_RG_S10E5] = {16, 16}, | ||
557 | [SVGA3D_RG_S23E8] = {32, 32}, | ||
558 | [SVGA3D_BUFFER] = {8, 8}, | ||
559 | [SVGA3D_Z_D24X8] = {32, 32}, | ||
560 | [SVGA3D_V16U16] = {32, 32}, | ||
561 | [SVGA3D_G16R16] = {32, 32}, | ||
562 | [SVGA3D_A16B16G16R16] = {64, 64}, | ||
563 | [SVGA3D_UYVY] = {12, 12}, | ||
564 | [SVGA3D_YUY2] = {12, 12}, | ||
565 | [SVGA3D_NV12] = {12, 8}, | ||
566 | [SVGA3D_AYUV] = {32, 32}, | ||
567 | [SVGA3D_BC4_UNORM] = {4, 16}, | ||
568 | [SVGA3D_BC5_UNORM] = {8, 32}, | ||
569 | [SVGA3D_Z_DF16] = {16, 16}, | ||
570 | [SVGA3D_Z_DF24] = {24, 24}, | ||
571 | [SVGA3D_Z_D24S8_INT] = {32, 32} | ||
572 | }; | ||
573 | |||
386 | 574 | ||
387 | /** | 575 | /** |
388 | * Surface management. | 576 | * Surface management. |
389 | */ | 577 | */ |
390 | 578 | ||
579 | struct vmw_surface_dma { | ||
580 | SVGA3dCmdHeader header; | ||
581 | SVGA3dCmdSurfaceDMA body; | ||
582 | SVGA3dCopyBox cb; | ||
583 | SVGA3dCmdSurfaceDMASuffix suffix; | ||
584 | }; | ||
585 | |||
586 | struct vmw_surface_define { | ||
587 | SVGA3dCmdHeader header; | ||
588 | SVGA3dCmdDefineSurface body; | ||
589 | }; | ||
590 | |||
591 | struct vmw_surface_destroy { | ||
592 | SVGA3dCmdHeader header; | ||
593 | SVGA3dCmdDestroySurface body; | ||
594 | }; | ||
595 | |||
596 | |||
597 | /** | ||
598 | * vmw_surface_dma_size - Compute fifo size for a dma command. | ||
599 | * | ||
600 | * @srf: Pointer to a struct vmw_surface | ||
601 | * | ||
602 | * Computes the required size for a surface dma command for backup or | ||
603 | * restoration of the surface represented by @srf. | ||
604 | */ | ||
605 | static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) | ||
606 | { | ||
607 | return srf->num_sizes * sizeof(struct vmw_surface_dma); | ||
608 | } | ||
609 | |||
610 | |||
611 | /** | ||
612 | * vmw_surface_define_size - Compute fifo size for a surface define command. | ||
613 | * | ||
614 | * @srf: Pointer to a struct vmw_surface | ||
615 | * | ||
616 | * Computes the required size for a surface define command for the definition | ||
617 | * of the surface represented by @srf. | ||
618 | */ | ||
619 | static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) | ||
620 | { | ||
621 | return sizeof(struct vmw_surface_define) + srf->num_sizes * | ||
622 | sizeof(SVGA3dSize); | ||
623 | } | ||
624 | |||
625 | |||
626 | /** | ||
627 | * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. | ||
628 | * | ||
629 | * Computes the required size for a surface destroy command for the destruction | ||
630 | * of a hw surface. | ||
631 | */ | ||
632 | static inline uint32_t vmw_surface_destroy_size(void) | ||
633 | { | ||
634 | return sizeof(struct vmw_surface_destroy); | ||
635 | } | ||
636 | |||
637 | /** | ||
638 | * vmw_surface_destroy_encode - Encode a surface_destroy command. | ||
639 | * | ||
640 | * @id: The surface id | ||
641 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
642 | */ | ||
643 | static void vmw_surface_destroy_encode(uint32_t id, | ||
644 | void *cmd_space) | ||
645 | { | ||
646 | struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) | ||
647 | cmd_space; | ||
648 | |||
649 | cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; | ||
650 | cmd->header.size = sizeof(cmd->body); | ||
651 | cmd->body.sid = id; | ||
652 | } | ||
653 | |||
654 | /** | ||
655 | * vmw_surface_define_encode - Encode a surface_define command. | ||
656 | * | ||
657 | * @srf: Pointer to a struct vmw_surface object. | ||
658 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
659 | */ | ||
660 | static void vmw_surface_define_encode(const struct vmw_surface *srf, | ||
661 | void *cmd_space) | ||
662 | { | ||
663 | struct vmw_surface_define *cmd = (struct vmw_surface_define *) | ||
664 | cmd_space; | ||
665 | struct drm_vmw_size *src_size; | ||
666 | SVGA3dSize *cmd_size; | ||
667 | uint32_t cmd_len; | ||
668 | int i; | ||
669 | |||
670 | cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); | ||
671 | |||
672 | cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; | ||
673 | cmd->header.size = cmd_len; | ||
674 | cmd->body.sid = srf->res.id; | ||
675 | cmd->body.surfaceFlags = srf->flags; | ||
676 | cmd->body.format = cpu_to_le32(srf->format); | ||
677 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
678 | cmd->body.face[i].numMipLevels = srf->mip_levels[i]; | ||
679 | |||
680 | cmd += 1; | ||
681 | cmd_size = (SVGA3dSize *) cmd; | ||
682 | src_size = srf->sizes; | ||
683 | |||
684 | for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { | ||
685 | cmd_size->width = src_size->width; | ||
686 | cmd_size->height = src_size->height; | ||
687 | cmd_size->depth = src_size->depth; | ||
688 | } | ||
689 | } | ||
690 | |||
691 | |||
692 | /** | ||
693 | * vmw_surface_dma_encode - Encode a surface_dma command. | ||
694 | * | ||
695 | * @srf: Pointer to a struct vmw_surface object. | ||
696 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
697 | * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents | ||
698 | * should be placed or read from. | ||
699 | * @to_surface: Boolean whether to DMA to the surface or from the surface. | ||
700 | */ | ||
701 | static void vmw_surface_dma_encode(struct vmw_surface *srf, | ||
702 | void *cmd_space, | ||
703 | const SVGAGuestPtr *ptr, | ||
704 | bool to_surface) | ||
705 | { | ||
706 | uint32_t i; | ||
707 | uint32_t bpp = vmw_sf_bpp[srf->format].bpp; | ||
708 | uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp; | ||
709 | struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; | ||
710 | |||
711 | for (i = 0; i < srf->num_sizes; ++i) { | ||
712 | SVGA3dCmdHeader *header = &cmd->header; | ||
713 | SVGA3dCmdSurfaceDMA *body = &cmd->body; | ||
714 | SVGA3dCopyBox *cb = &cmd->cb; | ||
715 | SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; | ||
716 | const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; | ||
717 | const struct drm_vmw_size *cur_size = &srf->sizes[i]; | ||
718 | |||
719 | header->id = SVGA_3D_CMD_SURFACE_DMA; | ||
720 | header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); | ||
721 | |||
722 | body->guest.ptr = *ptr; | ||
723 | body->guest.ptr.offset += cur_offset->bo_offset; | ||
724 | body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3; | ||
725 | body->host.sid = srf->res.id; | ||
726 | body->host.face = cur_offset->face; | ||
727 | body->host.mipmap = cur_offset->mip; | ||
728 | body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : | ||
729 | SVGA3D_READ_HOST_VRAM); | ||
730 | cb->x = 0; | ||
731 | cb->y = 0; | ||
732 | cb->z = 0; | ||
733 | cb->srcx = 0; | ||
734 | cb->srcy = 0; | ||
735 | cb->srcz = 0; | ||
736 | cb->w = cur_size->width; | ||
737 | cb->h = cur_size->height; | ||
738 | cb->d = cur_size->depth; | ||
739 | |||
740 | suffix->suffixSize = sizeof(*suffix); | ||
741 | suffix->maximumOffset = body->guest.pitch*cur_size->height* | ||
742 | cur_size->depth*bpp / stride_bpp; | ||
743 | suffix->flags.discard = 0; | ||
744 | suffix->flags.unsynchronized = 0; | ||
745 | suffix->flags.reserved = 0; | ||
746 | ++cmd; | ||
747 | } | ||
748 | }; | ||
749 | |||
750 | |||
391 | static void vmw_hw_surface_destroy(struct vmw_resource *res) | 751 | static void vmw_hw_surface_destroy(struct vmw_resource *res) |
392 | { | 752 | { |
393 | 753 | ||
394 | struct vmw_private *dev_priv = res->dev_priv; | 754 | struct vmw_private *dev_priv = res->dev_priv; |
395 | struct { | 755 | struct vmw_surface *srf; |
396 | SVGA3dCmdHeader header; | 756 | void *cmd; |
397 | SVGA3dCmdDestroySurface body; | ||
398 | } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
399 | 757 | ||
400 | if (unlikely(cmd == NULL)) { | 758 | if (res->id != -1) { |
401 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
402 | "destruction.\n"); | ||
403 | return; | ||
404 | } | ||
405 | 759 | ||
406 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY); | 760 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); |
407 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | 761 | if (unlikely(cmd == NULL)) { |
408 | cmd->body.sid = cpu_to_le32(res->id); | 762 | DRM_ERROR("Failed reserving FIFO space for surface " |
763 | "destruction.\n"); | ||
764 | return; | ||
765 | } | ||
409 | 766 | ||
410 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 767 | vmw_surface_destroy_encode(res->id, cmd); |
411 | vmw_3d_resource_dec(dev_priv); | 768 | vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); |
769 | |||
770 | /* | ||
771 | * used_memory_size_atomic, or separate lock | ||
772 | * to avoid taking dev_priv::cmdbuf_mutex in | ||
773 | * the destroy path. | ||
774 | */ | ||
775 | |||
776 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
777 | srf = container_of(res, struct vmw_surface, res); | ||
778 | dev_priv->used_memory_size -= srf->backup_size; | ||
779 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
780 | |||
781 | } | ||
782 | vmw_3d_resource_dec(dev_priv, false); | ||
412 | } | 783 | } |
413 | 784 | ||
414 | void vmw_surface_res_free(struct vmw_resource *res) | 785 | void vmw_surface_res_free(struct vmw_resource *res) |
415 | { | 786 | { |
416 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | 787 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); |
417 | 788 | ||
789 | if (srf->backup) | ||
790 | ttm_bo_unref(&srf->backup); | ||
791 | kfree(srf->offsets); | ||
418 | kfree(srf->sizes); | 792 | kfree(srf->sizes); |
419 | kfree(srf->snooper.image); | 793 | kfree(srf->snooper.image); |
420 | kfree(srf); | 794 | kfree(srf); |
421 | } | 795 | } |
422 | 796 | ||
423 | int vmw_surface_init(struct vmw_private *dev_priv, | 797 | |
424 | struct vmw_surface *srf, | 798 | /** |
425 | void (*res_free) (struct vmw_resource *res)) | 799 | * vmw_surface_do_validate - make a surface available to the device. |
800 | * | ||
801 | * @dev_priv: Pointer to a device private struct. | ||
802 | * @srf: Pointer to a struct vmw_surface. | ||
803 | * | ||
804 | * If the surface doesn't have a hw id, allocate one, and optionally | ||
805 | * DMA the backed up surface contents to the device. | ||
806 | * | ||
807 | * Returns -EBUSY if there wasn't sufficient device resources to | ||
808 | * complete the validation. Retry after freeing up resources. | ||
809 | * | ||
810 | * May return other errors if the kernel is out of guest resources. | ||
811 | */ | ||
812 | int vmw_surface_do_validate(struct vmw_private *dev_priv, | ||
813 | struct vmw_surface *srf) | ||
426 | { | 814 | { |
427 | int ret; | ||
428 | struct { | ||
429 | SVGA3dCmdHeader header; | ||
430 | SVGA3dCmdDefineSurface body; | ||
431 | } *cmd; | ||
432 | SVGA3dSize *cmd_size; | ||
433 | struct vmw_resource *res = &srf->res; | 815 | struct vmw_resource *res = &srf->res; |
434 | struct drm_vmw_size *src_size; | 816 | struct list_head val_list; |
435 | size_t submit_size; | 817 | struct ttm_validate_buffer val_buf; |
436 | uint32_t cmd_len; | 818 | uint32_t submit_size; |
437 | int i; | 819 | uint8_t *cmd; |
820 | int ret; | ||
438 | 821 | ||
439 | BUG_ON(res_free == NULL); | 822 | if (likely(res->id != -1)) |
440 | ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, | 823 | return 0; |
441 | VMW_RES_SURFACE, res_free); | 824 | |
825 | if (unlikely(dev_priv->used_memory_size + srf->backup_size >= | ||
826 | dev_priv->memory_size)) | ||
827 | return -EBUSY; | ||
828 | |||
829 | /* | ||
830 | * Reserve- and validate the backup DMA bo. | ||
831 | */ | ||
832 | |||
833 | if (srf->backup) { | ||
834 | INIT_LIST_HEAD(&val_list); | ||
835 | val_buf.bo = ttm_bo_reference(srf->backup); | ||
836 | val_buf.new_sync_obj_arg = (void *)((unsigned long) | ||
837 | DRM_VMW_FENCE_FLAG_EXEC); | ||
838 | list_add_tail(&val_buf.head, &val_list); | ||
839 | ret = ttm_eu_reserve_buffers(&val_list); | ||
840 | if (unlikely(ret != 0)) | ||
841 | goto out_no_reserve; | ||
842 | |||
843 | ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, | ||
844 | true, false, false); | ||
845 | if (unlikely(ret != 0)) | ||
846 | goto out_no_validate; | ||
847 | } | ||
848 | |||
849 | /* | ||
850 | * Alloc id for the resource. | ||
851 | */ | ||
442 | 852 | ||
853 | ret = vmw_resource_alloc_id(dev_priv, res); | ||
443 | if (unlikely(ret != 0)) { | 854 | if (unlikely(ret != 0)) { |
444 | res_free(res); | 855 | DRM_ERROR("Failed to allocate a surface id.\n"); |
445 | return ret; | 856 | goto out_no_id; |
857 | } | ||
858 | if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { | ||
859 | ret = -EBUSY; | ||
860 | goto out_no_fifo; | ||
446 | } | 861 | } |
447 | 862 | ||
448 | submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize); | 863 | |
449 | cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); | 864 | /* |
865 | * Encode surface define- and dma commands. | ||
866 | */ | ||
867 | |||
868 | submit_size = vmw_surface_define_size(srf); | ||
869 | if (srf->backup) | ||
870 | submit_size += vmw_surface_dma_size(srf); | ||
450 | 871 | ||
451 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 872 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
452 | if (unlikely(cmd == NULL)) { | 873 | if (unlikely(cmd == NULL)) { |
453 | DRM_ERROR("Fifo reserve failed for create surface.\n"); | 874 | DRM_ERROR("Failed reserving FIFO space for surface " |
454 | vmw_resource_unreference(&res); | 875 | "validation.\n"); |
455 | return -ENOMEM; | 876 | ret = -ENOMEM; |
877 | goto out_no_fifo; | ||
456 | } | 878 | } |
457 | 879 | ||
458 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE); | 880 | vmw_surface_define_encode(srf, cmd); |
459 | cmd->header.size = cpu_to_le32(cmd_len); | 881 | if (srf->backup) { |
460 | cmd->body.sid = cpu_to_le32(res->id); | 882 | SVGAGuestPtr ptr; |
461 | cmd->body.surfaceFlags = cpu_to_le32(srf->flags); | 883 | |
462 | cmd->body.format = cpu_to_le32(srf->format); | 884 | cmd += vmw_surface_define_size(srf); |
463 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { | 885 | vmw_bo_get_guest_ptr(srf->backup, &ptr); |
464 | cmd->body.face[i].numMipLevels = | 886 | vmw_surface_dma_encode(srf, cmd, &ptr, true); |
465 | cpu_to_le32(srf->mip_levels[i]); | ||
466 | } | 887 | } |
467 | 888 | ||
468 | cmd += 1; | 889 | vmw_fifo_commit(dev_priv, submit_size); |
469 | cmd_size = (SVGA3dSize *) cmd; | ||
470 | src_size = srf->sizes; | ||
471 | 890 | ||
472 | for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { | 891 | /* |
473 | cmd_size->width = cpu_to_le32(src_size->width); | 892 | * Create a fence object and fence the backup buffer. |
474 | cmd_size->height = cpu_to_le32(src_size->height); | 893 | */ |
475 | cmd_size->depth = cpu_to_le32(src_size->depth); | 894 | |
895 | if (srf->backup) { | ||
896 | struct vmw_fence_obj *fence; | ||
897 | |||
898 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
899 | &fence, NULL); | ||
900 | ttm_eu_fence_buffer_objects(&val_list, fence); | ||
901 | if (likely(fence != NULL)) | ||
902 | vmw_fence_obj_unreference(&fence); | ||
903 | ttm_bo_unref(&val_buf.bo); | ||
904 | ttm_bo_unref(&srf->backup); | ||
905 | } | ||
906 | |||
907 | /* | ||
908 | * Surface memory usage accounting. | ||
909 | */ | ||
910 | |||
911 | dev_priv->used_memory_size += srf->backup_size; | ||
912 | |||
913 | return 0; | ||
914 | |||
915 | out_no_fifo: | ||
916 | vmw_resource_release_id(res); | ||
917 | out_no_id: | ||
918 | out_no_validate: | ||
919 | if (srf->backup) | ||
920 | ttm_eu_backoff_reservation(&val_list); | ||
921 | out_no_reserve: | ||
922 | if (srf->backup) | ||
923 | ttm_bo_unref(&val_buf.bo); | ||
924 | return ret; | ||
925 | } | ||
926 | |||
927 | /** | ||
928 | * vmw_surface_evict - Evict a hw surface. | ||
929 | * | ||
930 | * @dev_priv: Pointer to a device private struct. | ||
931 | * @srf: Pointer to a struct vmw_surface | ||
932 | * | ||
933 | * DMA the contents of a hw surface to a backup guest buffer object, | ||
934 | * and destroy the hw surface, releasing its id. | ||
935 | */ | ||
936 | int vmw_surface_evict(struct vmw_private *dev_priv, | ||
937 | struct vmw_surface *srf) | ||
938 | { | ||
939 | struct vmw_resource *res = &srf->res; | ||
940 | struct list_head val_list; | ||
941 | struct ttm_validate_buffer val_buf; | ||
942 | uint32_t submit_size; | ||
943 | uint8_t *cmd; | ||
944 | int ret; | ||
945 | struct vmw_fence_obj *fence; | ||
946 | SVGAGuestPtr ptr; | ||
947 | |||
948 | BUG_ON(res->id == -1); | ||
949 | |||
950 | /* | ||
951 | * Create a surface backup buffer object. | ||
952 | */ | ||
953 | |||
954 | if (!srf->backup) { | ||
955 | ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size, | ||
956 | ttm_bo_type_device, | ||
957 | &vmw_srf_placement, 0, 0, true, | ||
958 | NULL, &srf->backup); | ||
959 | if (unlikely(ret != 0)) | ||
960 | return ret; | ||
961 | } | ||
962 | |||
963 | /* | ||
964 | * Reserve- and validate the backup DMA bo. | ||
965 | */ | ||
966 | |||
967 | INIT_LIST_HEAD(&val_list); | ||
968 | val_buf.bo = ttm_bo_reference(srf->backup); | ||
969 | val_buf.new_sync_obj_arg = (void *)(unsigned long) | ||
970 | DRM_VMW_FENCE_FLAG_EXEC; | ||
971 | list_add_tail(&val_buf.head, &val_list); | ||
972 | ret = ttm_eu_reserve_buffers(&val_list); | ||
973 | if (unlikely(ret != 0)) | ||
974 | goto out_no_reserve; | ||
975 | |||
976 | ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, | ||
977 | true, false, false); | ||
978 | if (unlikely(ret != 0)) | ||
979 | goto out_no_validate; | ||
980 | |||
981 | |||
982 | /* | ||
983 | * Encode the dma- and surface destroy commands. | ||
984 | */ | ||
985 | |||
986 | submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size(); | ||
987 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
988 | if (unlikely(cmd == NULL)) { | ||
989 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
990 | "eviction.\n"); | ||
991 | ret = -ENOMEM; | ||
992 | goto out_no_fifo; | ||
476 | } | 993 | } |
477 | 994 | ||
995 | vmw_bo_get_guest_ptr(srf->backup, &ptr); | ||
996 | vmw_surface_dma_encode(srf, cmd, &ptr, false); | ||
997 | cmd += vmw_surface_dma_size(srf); | ||
998 | vmw_surface_destroy_encode(res->id, cmd); | ||
478 | vmw_fifo_commit(dev_priv, submit_size); | 999 | vmw_fifo_commit(dev_priv, submit_size); |
479 | (void) vmw_3d_resource_inc(dev_priv); | 1000 | |
480 | vmw_resource_activate(res, vmw_hw_surface_destroy); | 1001 | /* |
1002 | * Surface memory usage accounting. | ||
1003 | */ | ||
1004 | |||
1005 | dev_priv->used_memory_size -= srf->backup_size; | ||
1006 | |||
1007 | /* | ||
1008 | * Create a fence object and fence the DMA buffer. | ||
1009 | */ | ||
1010 | |||
1011 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
1012 | &fence, NULL); | ||
1013 | ttm_eu_fence_buffer_objects(&val_list, fence); | ||
1014 | if (likely(fence != NULL)) | ||
1015 | vmw_fence_obj_unreference(&fence); | ||
1016 | ttm_bo_unref(&val_buf.bo); | ||
1017 | |||
1018 | /* | ||
1019 | * Release the surface ID. | ||
1020 | */ | ||
1021 | |||
1022 | vmw_resource_release_id(res); | ||
1023 | |||
481 | return 0; | 1024 | return 0; |
1025 | |||
1026 | out_no_fifo: | ||
1027 | out_no_validate: | ||
1028 | if (srf->backup) | ||
1029 | ttm_eu_backoff_reservation(&val_list); | ||
1030 | out_no_reserve: | ||
1031 | ttm_bo_unref(&val_buf.bo); | ||
1032 | ttm_bo_unref(&srf->backup); | ||
1033 | return ret; | ||
1034 | } | ||
1035 | |||
1036 | |||
1037 | /** | ||
1038 | * vmw_surface_validate - make a surface available to the device, evicting | ||
1039 | * other surfaces if needed. | ||
1040 | * | ||
1041 | * @dev_priv: Pointer to a device private struct. | ||
1042 | * @srf: Pointer to a struct vmw_surface. | ||
1043 | * | ||
1044 | * Try to validate a surface and if it fails due to limited device resources, | ||
1045 | * repeatedly try to evict other surfaces until the request can be | ||
1046 | * acommodated. | ||
1047 | * | ||
1048 | * May return errors if out of resources. | ||
1049 | */ | ||
1050 | int vmw_surface_validate(struct vmw_private *dev_priv, | ||
1051 | struct vmw_surface *srf) | ||
1052 | { | ||
1053 | int ret; | ||
1054 | struct vmw_surface *evict_srf; | ||
1055 | |||
1056 | do { | ||
1057 | write_lock(&dev_priv->resource_lock); | ||
1058 | list_del_init(&srf->lru_head); | ||
1059 | write_unlock(&dev_priv->resource_lock); | ||
1060 | |||
1061 | ret = vmw_surface_do_validate(dev_priv, srf); | ||
1062 | if (likely(ret != -EBUSY)) | ||
1063 | break; | ||
1064 | |||
1065 | write_lock(&dev_priv->resource_lock); | ||
1066 | if (list_empty(&dev_priv->surface_lru)) { | ||
1067 | DRM_ERROR("Out of device memory for surfaces.\n"); | ||
1068 | ret = -EBUSY; | ||
1069 | write_unlock(&dev_priv->resource_lock); | ||
1070 | break; | ||
1071 | } | ||
1072 | |||
1073 | evict_srf = vmw_surface_reference | ||
1074 | (list_first_entry(&dev_priv->surface_lru, | ||
1075 | struct vmw_surface, | ||
1076 | lru_head)); | ||
1077 | list_del_init(&evict_srf->lru_head); | ||
1078 | |||
1079 | write_unlock(&dev_priv->resource_lock); | ||
1080 | (void) vmw_surface_evict(dev_priv, evict_srf); | ||
1081 | |||
1082 | vmw_surface_unreference(&evict_srf); | ||
1083 | |||
1084 | } while (1); | ||
1085 | |||
1086 | if (unlikely(ret != 0 && srf->res.id != -1)) { | ||
1087 | write_lock(&dev_priv->resource_lock); | ||
1088 | list_add_tail(&srf->lru_head, &dev_priv->surface_lru); | ||
1089 | write_unlock(&dev_priv->resource_lock); | ||
1090 | } | ||
1091 | |||
1092 | return ret; | ||
1093 | } | ||
1094 | |||
1095 | |||
1096 | /** | ||
1097 | * vmw_surface_remove_from_lists - Remove surface resources from lookup lists | ||
1098 | * | ||
1099 | * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface | ||
1100 | * | ||
1101 | * As part of the resource destruction, remove the surface from any | ||
1102 | * lookup lists. | ||
1103 | */ | ||
1104 | static void vmw_surface_remove_from_lists(struct vmw_resource *res) | ||
1105 | { | ||
1106 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | ||
1107 | |||
1108 | list_del_init(&srf->lru_head); | ||
1109 | } | ||
1110 | |||
1111 | int vmw_surface_init(struct vmw_private *dev_priv, | ||
1112 | struct vmw_surface *srf, | ||
1113 | void (*res_free) (struct vmw_resource *res)) | ||
1114 | { | ||
1115 | int ret; | ||
1116 | struct vmw_resource *res = &srf->res; | ||
1117 | |||
1118 | BUG_ON(res_free == NULL); | ||
1119 | INIT_LIST_HEAD(&srf->lru_head); | ||
1120 | ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, | ||
1121 | VMW_RES_SURFACE, true, res_free, | ||
1122 | vmw_surface_remove_from_lists); | ||
1123 | |||
1124 | if (unlikely(ret != 0)) | ||
1125 | res_free(res); | ||
1126 | |||
1127 | /* | ||
1128 | * The surface won't be visible to hardware until a | ||
1129 | * surface validate. | ||
1130 | */ | ||
1131 | |||
1132 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
1133 | vmw_resource_activate(res, vmw_hw_surface_destroy); | ||
1134 | return ret; | ||
482 | } | 1135 | } |
483 | 1136 | ||
484 | static void vmw_user_surface_free(struct vmw_resource *res) | 1137 | static void vmw_user_surface_free(struct vmw_resource *res) |
@@ -486,12 +1139,58 @@ static void vmw_user_surface_free(struct vmw_resource *res) | |||
486 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | 1139 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); |
487 | struct vmw_user_surface *user_srf = | 1140 | struct vmw_user_surface *user_srf = |
488 | container_of(srf, struct vmw_user_surface, srf); | 1141 | container_of(srf, struct vmw_user_surface, srf); |
1142 | struct vmw_private *dev_priv = srf->res.dev_priv; | ||
1143 | uint32_t size = user_srf->size; | ||
489 | 1144 | ||
1145 | if (srf->backup) | ||
1146 | ttm_bo_unref(&srf->backup); | ||
1147 | kfree(srf->offsets); | ||
490 | kfree(srf->sizes); | 1148 | kfree(srf->sizes); |
491 | kfree(srf->snooper.image); | 1149 | kfree(srf->snooper.image); |
492 | kfree(user_srf); | 1150 | kfree(user_srf); |
1151 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
1152 | } | ||
1153 | |||
1154 | /** | ||
1155 | * vmw_resource_unreserve - unreserve resources previously reserved for | ||
1156 | * command submission. | ||
1157 | * | ||
1158 | * @list_head: list of resources to unreserve. | ||
1159 | * | ||
1160 | * Currently only surfaces are considered, and unreserving a surface | ||
1161 | * means putting it back on the device's surface lru list, | ||
1162 | * so that it can be evicted if necessary. | ||
1163 | * This function traverses the resource list and | ||
1164 | * checks whether resources are surfaces, and in that case puts them back | ||
1165 | * on the device's surface LRU list. | ||
1166 | */ | ||
1167 | void vmw_resource_unreserve(struct list_head *list) | ||
1168 | { | ||
1169 | struct vmw_resource *res; | ||
1170 | struct vmw_surface *srf; | ||
1171 | rwlock_t *lock = NULL; | ||
1172 | |||
1173 | list_for_each_entry(res, list, validate_head) { | ||
1174 | |||
1175 | if (res->res_free != &vmw_surface_res_free && | ||
1176 | res->res_free != &vmw_user_surface_free) | ||
1177 | continue; | ||
1178 | |||
1179 | if (unlikely(lock == NULL)) { | ||
1180 | lock = &res->dev_priv->resource_lock; | ||
1181 | write_lock(lock); | ||
1182 | } | ||
1183 | |||
1184 | srf = container_of(res, struct vmw_surface, res); | ||
1185 | list_del_init(&srf->lru_head); | ||
1186 | list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru); | ||
1187 | } | ||
1188 | |||
1189 | if (lock != NULL) | ||
1190 | write_unlock(lock); | ||
493 | } | 1191 | } |
494 | 1192 | ||
1193 | |||
495 | int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, | 1194 | int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, |
496 | struct ttm_object_file *tfile, | 1195 | struct ttm_object_file *tfile, |
497 | uint32_t handle, struct vmw_surface **out) | 1196 | uint32_t handle, struct vmw_surface **out) |
@@ -556,8 +1255,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
556 | struct drm_file *file_priv) | 1255 | struct drm_file *file_priv) |
557 | { | 1256 | { |
558 | struct vmw_private *dev_priv = vmw_priv(dev); | 1257 | struct vmw_private *dev_priv = vmw_priv(dev); |
559 | struct vmw_user_surface *user_srf = | 1258 | struct vmw_user_surface *user_srf; |
560 | kmalloc(sizeof(*user_srf), GFP_KERNEL); | ||
561 | struct vmw_surface *srf; | 1259 | struct vmw_surface *srf; |
562 | struct vmw_resource *res; | 1260 | struct vmw_resource *res; |
563 | struct vmw_resource *tmp; | 1261 | struct vmw_resource *tmp; |
@@ -568,10 +1266,51 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
568 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 1266 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
569 | struct drm_vmw_size __user *user_sizes; | 1267 | struct drm_vmw_size __user *user_sizes; |
570 | int ret; | 1268 | int ret; |
571 | int i; | 1269 | int i, j; |
1270 | uint32_t cur_bo_offset; | ||
1271 | struct drm_vmw_size *cur_size; | ||
1272 | struct vmw_surface_offset *cur_offset; | ||
1273 | uint32_t stride_bpp; | ||
1274 | uint32_t bpp; | ||
1275 | uint32_t num_sizes; | ||
1276 | uint32_t size; | ||
1277 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
572 | 1278 | ||
573 | if (unlikely(user_srf == NULL)) | 1279 | if (unlikely(vmw_user_surface_size == 0)) |
574 | return -ENOMEM; | 1280 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + |
1281 | 128; | ||
1282 | |||
1283 | num_sizes = 0; | ||
1284 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
1285 | num_sizes += req->mip_levels[i]; | ||
1286 | |||
1287 | if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * | ||
1288 | DRM_VMW_MAX_MIP_LEVELS) | ||
1289 | return -EINVAL; | ||
1290 | |||
1291 | size = vmw_user_surface_size + 128 + | ||
1292 | ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + | ||
1293 | ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); | ||
1294 | |||
1295 | |||
1296 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1297 | if (unlikely(ret != 0)) | ||
1298 | return ret; | ||
1299 | |||
1300 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
1301 | size, false, true); | ||
1302 | if (unlikely(ret != 0)) { | ||
1303 | if (ret != -ERESTARTSYS) | ||
1304 | DRM_ERROR("Out of graphics memory for surface" | ||
1305 | " creation.\n"); | ||
1306 | goto out_unlock; | ||
1307 | } | ||
1308 | |||
1309 | user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL); | ||
1310 | if (unlikely(user_srf == NULL)) { | ||
1311 | ret = -ENOMEM; | ||
1312 | goto out_no_user_srf; | ||
1313 | } | ||
575 | 1314 | ||
576 | srf = &user_srf->srf; | 1315 | srf = &user_srf->srf; |
577 | res = &srf->res; | 1316 | res = &srf->res; |
@@ -579,21 +1318,22 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
579 | srf->flags = req->flags; | 1318 | srf->flags = req->flags; |
580 | srf->format = req->format; | 1319 | srf->format = req->format; |
581 | srf->scanout = req->scanout; | 1320 | srf->scanout = req->scanout; |
582 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | 1321 | srf->backup = NULL; |
583 | srf->num_sizes = 0; | ||
584 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
585 | srf->num_sizes += srf->mip_levels[i]; | ||
586 | 1322 | ||
587 | if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES * | 1323 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); |
588 | DRM_VMW_MAX_MIP_LEVELS) { | 1324 | srf->num_sizes = num_sizes; |
589 | ret = -EINVAL; | 1325 | user_srf->size = size; |
590 | goto out_err0; | ||
591 | } | ||
592 | 1326 | ||
593 | srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); | 1327 | srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); |
594 | if (unlikely(srf->sizes == NULL)) { | 1328 | if (unlikely(srf->sizes == NULL)) { |
595 | ret = -ENOMEM; | 1329 | ret = -ENOMEM; |
596 | goto out_err0; | 1330 | goto out_no_sizes; |
1331 | } | ||
1332 | srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), | ||
1333 | GFP_KERNEL); | ||
1334 | if (unlikely(srf->sizes == NULL)) { | ||
1335 | ret = -ENOMEM; | ||
1336 | goto out_no_offsets; | ||
597 | } | 1337 | } |
598 | 1338 | ||
599 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | 1339 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) |
@@ -603,9 +1343,32 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
603 | srf->num_sizes * sizeof(*srf->sizes)); | 1343 | srf->num_sizes * sizeof(*srf->sizes)); |
604 | if (unlikely(ret != 0)) { | 1344 | if (unlikely(ret != 0)) { |
605 | ret = -EFAULT; | 1345 | ret = -EFAULT; |
606 | goto out_err1; | 1346 | goto out_no_copy; |
607 | } | 1347 | } |
608 | 1348 | ||
1349 | cur_bo_offset = 0; | ||
1350 | cur_offset = srf->offsets; | ||
1351 | cur_size = srf->sizes; | ||
1352 | |||
1353 | bpp = vmw_sf_bpp[srf->format].bpp; | ||
1354 | stride_bpp = vmw_sf_bpp[srf->format].s_bpp; | ||
1355 | |||
1356 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { | ||
1357 | for (j = 0; j < srf->mip_levels[i]; ++j) { | ||
1358 | uint32_t stride = | ||
1359 | (cur_size->width * stride_bpp + 7) >> 3; | ||
1360 | |||
1361 | cur_offset->face = i; | ||
1362 | cur_offset->mip = j; | ||
1363 | cur_offset->bo_offset = cur_bo_offset; | ||
1364 | cur_bo_offset += stride * cur_size->height * | ||
1365 | cur_size->depth * bpp / stride_bpp; | ||
1366 | ++cur_offset; | ||
1367 | ++cur_size; | ||
1368 | } | ||
1369 | } | ||
1370 | srf->backup_size = cur_bo_offset; | ||
1371 | |||
609 | if (srf->scanout && | 1372 | if (srf->scanout && |
610 | srf->num_sizes == 1 && | 1373 | srf->num_sizes == 1 && |
611 | srf->sizes[0].width == 64 && | 1374 | srf->sizes[0].width == 64 && |
@@ -617,7 +1380,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
617 | if (!srf->snooper.image) { | 1380 | if (!srf->snooper.image) { |
618 | DRM_ERROR("Failed to allocate cursor_image\n"); | 1381 | DRM_ERROR("Failed to allocate cursor_image\n"); |
619 | ret = -ENOMEM; | 1382 | ret = -ENOMEM; |
620 | goto out_err1; | 1383 | goto out_no_copy; |
621 | } | 1384 | } |
622 | } else { | 1385 | } else { |
623 | srf->snooper.image = NULL; | 1386 | srf->snooper.image = NULL; |
@@ -634,7 +1397,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
634 | 1397 | ||
635 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); | 1398 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); |
636 | if (unlikely(ret != 0)) | 1399 | if (unlikely(ret != 0)) |
637 | return ret; | 1400 | goto out_unlock; |
638 | 1401 | ||
639 | tmp = vmw_resource_reference(&srf->res); | 1402 | tmp = vmw_resource_reference(&srf->res); |
640 | ret = ttm_base_object_init(tfile, &user_srf->base, | 1403 | ret = ttm_base_object_init(tfile, &user_srf->base, |
@@ -644,7 +1407,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
644 | if (unlikely(ret != 0)) { | 1407 | if (unlikely(ret != 0)) { |
645 | vmw_resource_unreference(&tmp); | 1408 | vmw_resource_unreference(&tmp); |
646 | vmw_resource_unreference(&res); | 1409 | vmw_resource_unreference(&res); |
647 | return ret; | 1410 | goto out_unlock; |
648 | } | 1411 | } |
649 | 1412 | ||
650 | rep->sid = user_srf->base.hash.key; | 1413 | rep->sid = user_srf->base.hash.key; |
@@ -652,11 +1415,19 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
652 | DRM_ERROR("Created bad Surface ID.\n"); | 1415 | DRM_ERROR("Created bad Surface ID.\n"); |
653 | 1416 | ||
654 | vmw_resource_unreference(&res); | 1417 | vmw_resource_unreference(&res); |
1418 | |||
1419 | ttm_read_unlock(&vmaster->lock); | ||
655 | return 0; | 1420 | return 0; |
656 | out_err1: | 1421 | out_no_copy: |
1422 | kfree(srf->offsets); | ||
1423 | out_no_offsets: | ||
657 | kfree(srf->sizes); | 1424 | kfree(srf->sizes); |
658 | out_err0: | 1425 | out_no_sizes: |
659 | kfree(user_srf); | 1426 | kfree(user_srf); |
1427 | out_no_user_srf: | ||
1428 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
1429 | out_unlock: | ||
1430 | ttm_read_unlock(&vmaster->lock); | ||
660 | return ret; | 1431 | return ret; |
661 | } | 1432 | } |
662 | 1433 | ||
@@ -970,7 +1741,7 @@ static int vmw_stream_init(struct vmw_private *dev_priv, | |||
970 | int ret; | 1741 | int ret; |
971 | 1742 | ||
972 | ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, | 1743 | ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, |
973 | VMW_RES_STREAM, res_free); | 1744 | VMW_RES_STREAM, false, res_free, NULL); |
974 | 1745 | ||
975 | if (unlikely(ret != 0)) { | 1746 | if (unlikely(ret != 0)) { |
976 | if (res_free == NULL) | 1747 | if (res_free == NULL) |
@@ -1000,8 +1771,11 @@ static void vmw_user_stream_free(struct vmw_resource *res) | |||
1000 | { | 1771 | { |
1001 | struct vmw_user_stream *stream = | 1772 | struct vmw_user_stream *stream = |
1002 | container_of(res, struct vmw_user_stream, stream.res); | 1773 | container_of(res, struct vmw_user_stream, stream.res); |
1774 | struct vmw_private *dev_priv = res->dev_priv; | ||
1003 | 1775 | ||
1004 | kfree(stream); | 1776 | kfree(stream); |
1777 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
1778 | vmw_user_stream_size); | ||
1005 | } | 1779 | } |
1006 | 1780 | ||
1007 | /** | 1781 | /** |
@@ -1055,23 +1829,56 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | |||
1055 | struct drm_file *file_priv) | 1829 | struct drm_file *file_priv) |
1056 | { | 1830 | { |
1057 | struct vmw_private *dev_priv = vmw_priv(dev); | 1831 | struct vmw_private *dev_priv = vmw_priv(dev); |
1058 | struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL); | 1832 | struct vmw_user_stream *stream; |
1059 | struct vmw_resource *res; | 1833 | struct vmw_resource *res; |
1060 | struct vmw_resource *tmp; | 1834 | struct vmw_resource *tmp; |
1061 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; | 1835 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; |
1062 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 1836 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
1837 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1063 | int ret; | 1838 | int ret; |
1064 | 1839 | ||
1065 | if (unlikely(stream == NULL)) | 1840 | /* |
1066 | return -ENOMEM; | 1841 | * Approximate idr memory usage with 128 bytes. It will be limited |
1842 | * by maximum number_of streams anyway? | ||
1843 | */ | ||
1844 | |||
1845 | if (unlikely(vmw_user_stream_size == 0)) | ||
1846 | vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128; | ||
1847 | |||
1848 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1849 | if (unlikely(ret != 0)) | ||
1850 | return ret; | ||
1851 | |||
1852 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
1853 | vmw_user_stream_size, | ||
1854 | false, true); | ||
1855 | if (unlikely(ret != 0)) { | ||
1856 | if (ret != -ERESTARTSYS) | ||
1857 | DRM_ERROR("Out of graphics memory for stream" | ||
1858 | " creation.\n"); | ||
1859 | goto out_unlock; | ||
1860 | } | ||
1861 | |||
1862 | |||
1863 | stream = kmalloc(sizeof(*stream), GFP_KERNEL); | ||
1864 | if (unlikely(stream == NULL)) { | ||
1865 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
1866 | vmw_user_stream_size); | ||
1867 | ret = -ENOMEM; | ||
1868 | goto out_unlock; | ||
1869 | } | ||
1067 | 1870 | ||
1068 | res = &stream->stream.res; | 1871 | res = &stream->stream.res; |
1069 | stream->base.shareable = false; | 1872 | stream->base.shareable = false; |
1070 | stream->base.tfile = NULL; | 1873 | stream->base.tfile = NULL; |
1071 | 1874 | ||
1875 | /* | ||
1876 | * From here on, the destructor takes over resource freeing. | ||
1877 | */ | ||
1878 | |||
1072 | ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); | 1879 | ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); |
1073 | if (unlikely(ret != 0)) | 1880 | if (unlikely(ret != 0)) |
1074 | return ret; | 1881 | goto out_unlock; |
1075 | 1882 | ||
1076 | tmp = vmw_resource_reference(res); | 1883 | tmp = vmw_resource_reference(res); |
1077 | ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, | 1884 | ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, |
@@ -1085,6 +1892,8 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | |||
1085 | arg->stream_id = res->id; | 1892 | arg->stream_id = res->id; |
1086 | out_err: | 1893 | out_err: |
1087 | vmw_resource_unreference(&res); | 1894 | vmw_resource_unreference(&res); |
1895 | out_unlock: | ||
1896 | ttm_read_unlock(&vmaster->lock); | ||
1088 | return ret; | 1897 | return ret; |
1089 | } | 1898 | } |
1090 | 1899 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c new file mode 100644 index 000000000000..477b2a9eb3c2 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | |||
@@ -0,0 +1,567 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_kms.h" | ||
29 | |||
30 | |||
31 | #define vmw_crtc_to_sou(x) \ | ||
32 | container_of(x, struct vmw_screen_object_unit, base.crtc) | ||
33 | #define vmw_encoder_to_sou(x) \ | ||
34 | container_of(x, struct vmw_screen_object_unit, base.encoder) | ||
35 | #define vmw_connector_to_sou(x) \ | ||
36 | container_of(x, struct vmw_screen_object_unit, base.connector) | ||
37 | |||
38 | struct vmw_screen_object_display { | ||
39 | struct list_head active; | ||
40 | |||
41 | unsigned num_active; | ||
42 | unsigned last_num_active; | ||
43 | |||
44 | struct vmw_framebuffer *fb; | ||
45 | }; | ||
46 | |||
47 | /** | ||
48 | * Display unit using screen objects. | ||
49 | */ | ||
50 | struct vmw_screen_object_unit { | ||
51 | struct vmw_display_unit base; | ||
52 | |||
53 | unsigned long buffer_size; /**< Size of allocated buffer */ | ||
54 | struct vmw_dma_buffer *buffer; /**< Backing store buffer */ | ||
55 | |||
56 | bool defined; | ||
57 | |||
58 | struct list_head active; | ||
59 | }; | ||
60 | |||
61 | static void vmw_sou_destroy(struct vmw_screen_object_unit *sou) | ||
62 | { | ||
63 | list_del_init(&sou->active); | ||
64 | vmw_display_unit_cleanup(&sou->base); | ||
65 | kfree(sou); | ||
66 | } | ||
67 | |||
68 | |||
69 | /* | ||
70 | * Screen Object Display Unit CRTC functions | ||
71 | */ | ||
72 | |||
73 | static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) | ||
74 | { | ||
75 | vmw_sou_destroy(vmw_crtc_to_sou(crtc)); | ||
76 | } | ||
77 | |||
78 | static int vmw_sou_del_active(struct vmw_private *vmw_priv, | ||
79 | struct vmw_screen_object_unit *sou) | ||
80 | { | ||
81 | struct vmw_screen_object_display *ld = vmw_priv->sou_priv; | ||
82 | if (list_empty(&sou->active)) | ||
83 | return 0; | ||
84 | |||
85 | /* Must init otherwise list_empty(&sou->active) will not work. */ | ||
86 | list_del_init(&sou->active); | ||
87 | if (--(ld->num_active) == 0) { | ||
88 | BUG_ON(!ld->fb); | ||
89 | if (ld->fb->unpin) | ||
90 | ld->fb->unpin(ld->fb); | ||
91 | ld->fb = NULL; | ||
92 | } | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static int vmw_sou_add_active(struct vmw_private *vmw_priv, | ||
98 | struct vmw_screen_object_unit *sou, | ||
99 | struct vmw_framebuffer *vfb) | ||
100 | { | ||
101 | struct vmw_screen_object_display *ld = vmw_priv->sou_priv; | ||
102 | struct vmw_screen_object_unit *entry; | ||
103 | struct list_head *at; | ||
104 | |||
105 | BUG_ON(!ld->num_active && ld->fb); | ||
106 | if (vfb != ld->fb) { | ||
107 | if (ld->fb && ld->fb->unpin) | ||
108 | ld->fb->unpin(ld->fb); | ||
109 | if (vfb->pin) | ||
110 | vfb->pin(vfb); | ||
111 | ld->fb = vfb; | ||
112 | } | ||
113 | |||
114 | if (!list_empty(&sou->active)) | ||
115 | return 0; | ||
116 | |||
117 | at = &ld->active; | ||
118 | list_for_each_entry(entry, &ld->active, active) { | ||
119 | if (entry->base.unit > sou->base.unit) | ||
120 | break; | ||
121 | |||
122 | at = &entry->active; | ||
123 | } | ||
124 | |||
125 | list_add(&sou->active, at); | ||
126 | |||
127 | ld->num_active++; | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * Send the fifo command to create a screen. | ||
134 | */ | ||
135 | static int vmw_sou_fifo_create(struct vmw_private *dev_priv, | ||
136 | struct vmw_screen_object_unit *sou, | ||
137 | uint32_t x, uint32_t y, | ||
138 | struct drm_display_mode *mode) | ||
139 | { | ||
140 | size_t fifo_size; | ||
141 | |||
142 | struct { | ||
143 | struct { | ||
144 | uint32_t cmdType; | ||
145 | } header; | ||
146 | SVGAScreenObject obj; | ||
147 | } *cmd; | ||
148 | |||
149 | BUG_ON(!sou->buffer); | ||
150 | |||
151 | fifo_size = sizeof(*cmd); | ||
152 | cmd = vmw_fifo_reserve(dev_priv, fifo_size); | ||
153 | /* The hardware has hung, nothing we can do about it here. */ | ||
154 | if (unlikely(cmd == NULL)) { | ||
155 | DRM_ERROR("Fifo reserve failed.\n"); | ||
156 | return -ENOMEM; | ||
157 | } | ||
158 | |||
159 | memset(cmd, 0, fifo_size); | ||
160 | cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN; | ||
161 | cmd->obj.structSize = sizeof(SVGAScreenObject); | ||
162 | cmd->obj.id = sou->base.unit; | ||
163 | cmd->obj.flags = SVGA_SCREEN_HAS_ROOT | | ||
164 | (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0); | ||
165 | cmd->obj.size.width = mode->hdisplay; | ||
166 | cmd->obj.size.height = mode->vdisplay; | ||
167 | cmd->obj.root.x = x; | ||
168 | cmd->obj.root.y = y; | ||
169 | |||
170 | /* Ok to assume that buffer is pinned in vram */ | ||
171 | vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr); | ||
172 | cmd->obj.backingStore.pitch = mode->hdisplay * 4; | ||
173 | |||
174 | vmw_fifo_commit(dev_priv, fifo_size); | ||
175 | |||
176 | sou->defined = true; | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * Send the fifo command to destroy a screen. | ||
183 | */ | ||
184 | static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv, | ||
185 | struct vmw_screen_object_unit *sou) | ||
186 | { | ||
187 | size_t fifo_size; | ||
188 | int ret; | ||
189 | |||
190 | struct { | ||
191 | struct { | ||
192 | uint32_t cmdType; | ||
193 | } header; | ||
194 | SVGAFifoCmdDestroyScreen body; | ||
195 | } *cmd; | ||
196 | |||
197 | /* no need to do anything */ | ||
198 | if (unlikely(!sou->defined)) | ||
199 | return 0; | ||
200 | |||
201 | fifo_size = sizeof(*cmd); | ||
202 | cmd = vmw_fifo_reserve(dev_priv, fifo_size); | ||
203 | /* the hardware has hung, nothing we can do about it here */ | ||
204 | if (unlikely(cmd == NULL)) { | ||
205 | DRM_ERROR("Fifo reserve failed.\n"); | ||
206 | return -ENOMEM; | ||
207 | } | ||
208 | |||
209 | memset(cmd, 0, fifo_size); | ||
210 | cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN; | ||
211 | cmd->body.screenId = sou->base.unit; | ||
212 | |||
213 | vmw_fifo_commit(dev_priv, fifo_size); | ||
214 | |||
215 | /* Force sync */ | ||
216 | ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ); | ||
217 | if (unlikely(ret != 0)) | ||
218 | DRM_ERROR("Failed to sync with HW"); | ||
219 | else | ||
220 | sou->defined = false; | ||
221 | |||
222 | return ret; | ||
223 | } | ||
224 | |||
225 | /** | ||
226 | * Free the backing store. | ||
227 | */ | ||
228 | static void vmw_sou_backing_free(struct vmw_private *dev_priv, | ||
229 | struct vmw_screen_object_unit *sou) | ||
230 | { | ||
231 | struct ttm_buffer_object *bo; | ||
232 | |||
233 | if (unlikely(sou->buffer == NULL)) | ||
234 | return; | ||
235 | |||
236 | bo = &sou->buffer->base; | ||
237 | ttm_bo_unref(&bo); | ||
238 | sou->buffer = NULL; | ||
239 | sou->buffer_size = 0; | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * Allocate the backing store for the buffer. | ||
244 | */ | ||
245 | static int vmw_sou_backing_alloc(struct vmw_private *dev_priv, | ||
246 | struct vmw_screen_object_unit *sou, | ||
247 | unsigned long size) | ||
248 | { | ||
249 | int ret; | ||
250 | |||
251 | if (sou->buffer_size == size) | ||
252 | return 0; | ||
253 | |||
254 | if (sou->buffer) | ||
255 | vmw_sou_backing_free(dev_priv, sou); | ||
256 | |||
257 | sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL); | ||
258 | if (unlikely(sou->buffer == NULL)) | ||
259 | return -ENOMEM; | ||
260 | |||
261 | /* After we have alloced the backing store might not be able to | ||
262 | * resume the overlays, this is preferred to failing to alloc. | ||
263 | */ | ||
264 | vmw_overlay_pause_all(dev_priv); | ||
265 | ret = vmw_dmabuf_init(dev_priv, sou->buffer, size, | ||
266 | &vmw_vram_ne_placement, | ||
267 | false, &vmw_dmabuf_bo_free); | ||
268 | vmw_overlay_resume_all(dev_priv); | ||
269 | |||
270 | if (unlikely(ret != 0)) | ||
271 | sou->buffer = NULL; /* vmw_dmabuf_init frees on error */ | ||
272 | else | ||
273 | sou->buffer_size = size; | ||
274 | |||
275 | return ret; | ||
276 | } | ||
277 | |||
278 | static int vmw_sou_crtc_set_config(struct drm_mode_set *set) | ||
279 | { | ||
280 | struct vmw_private *dev_priv; | ||
281 | struct vmw_screen_object_unit *sou; | ||
282 | struct drm_connector *connector; | ||
283 | struct drm_display_mode *mode; | ||
284 | struct drm_encoder *encoder; | ||
285 | struct vmw_framebuffer *vfb; | ||
286 | struct drm_framebuffer *fb; | ||
287 | struct drm_crtc *crtc; | ||
288 | int ret = 0; | ||
289 | |||
290 | if (!set) | ||
291 | return -EINVAL; | ||
292 | |||
293 | if (!set->crtc) | ||
294 | return -EINVAL; | ||
295 | |||
296 | /* get the sou */ | ||
297 | crtc = set->crtc; | ||
298 | sou = vmw_crtc_to_sou(crtc); | ||
299 | vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL; | ||
300 | dev_priv = vmw_priv(crtc->dev); | ||
301 | |||
302 | if (set->num_connectors > 1) { | ||
303 | DRM_ERROR("to many connectors\n"); | ||
304 | return -EINVAL; | ||
305 | } | ||
306 | |||
307 | if (set->num_connectors == 1 && | ||
308 | set->connectors[0] != &sou->base.connector) { | ||
309 | DRM_ERROR("connector doesn't match %p %p\n", | ||
310 | set->connectors[0], &sou->base.connector); | ||
311 | return -EINVAL; | ||
312 | } | ||
313 | |||
314 | /* sou only supports one fb active at the time */ | ||
315 | if (dev_priv->sou_priv->fb && vfb && | ||
316 | !(dev_priv->sou_priv->num_active == 1 && | ||
317 | !list_empty(&sou->active)) && | ||
318 | dev_priv->sou_priv->fb != vfb) { | ||
319 | DRM_ERROR("Multiple framebuffers not supported\n"); | ||
320 | return -EINVAL; | ||
321 | } | ||
322 | |||
323 | /* since they always map one to one these are safe */ | ||
324 | connector = &sou->base.connector; | ||
325 | encoder = &sou->base.encoder; | ||
326 | |||
327 | /* should we turn the crtc off */ | ||
328 | if (set->num_connectors == 0 || !set->mode || !set->fb) { | ||
329 | ret = vmw_sou_fifo_destroy(dev_priv, sou); | ||
330 | /* the hardware has hung don't do anything more */ | ||
331 | if (unlikely(ret != 0)) | ||
332 | return ret; | ||
333 | |||
334 | connector->encoder = NULL; | ||
335 | encoder->crtc = NULL; | ||
336 | crtc->fb = NULL; | ||
337 | crtc->x = 0; | ||
338 | crtc->y = 0; | ||
339 | |||
340 | vmw_sou_del_active(dev_priv, sou); | ||
341 | |||
342 | vmw_sou_backing_free(dev_priv, sou); | ||
343 | |||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | |||
348 | /* we now know we want to set a mode */ | ||
349 | mode = set->mode; | ||
350 | fb = set->fb; | ||
351 | |||
352 | if (set->x + mode->hdisplay > fb->width || | ||
353 | set->y + mode->vdisplay > fb->height) { | ||
354 | DRM_ERROR("set outside of framebuffer\n"); | ||
355 | return -EINVAL; | ||
356 | } | ||
357 | |||
358 | vmw_fb_off(dev_priv); | ||
359 | |||
360 | if (mode->hdisplay != crtc->mode.hdisplay || | ||
361 | mode->vdisplay != crtc->mode.vdisplay) { | ||
362 | /* no need to check if depth is different, because backing | ||
363 | * store depth is forced to 4 by the device. | ||
364 | */ | ||
365 | |||
366 | ret = vmw_sou_fifo_destroy(dev_priv, sou); | ||
367 | /* the hardware has hung don't do anything more */ | ||
368 | if (unlikely(ret != 0)) | ||
369 | return ret; | ||
370 | |||
371 | vmw_sou_backing_free(dev_priv, sou); | ||
372 | } | ||
373 | |||
374 | if (!sou->buffer) { | ||
375 | /* forced to depth 4 by the device */ | ||
376 | size_t size = mode->hdisplay * mode->vdisplay * 4; | ||
377 | ret = vmw_sou_backing_alloc(dev_priv, sou, size); | ||
378 | if (unlikely(ret != 0)) | ||
379 | return ret; | ||
380 | } | ||
381 | |||
382 | ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode); | ||
383 | if (unlikely(ret != 0)) { | ||
384 | /* | ||
385 | * We are in a bit of a situation here, the hardware has | ||
386 | * hung and we may or may not have a buffer hanging of | ||
387 | * the screen object, best thing to do is not do anything | ||
388 | * if we where defined, if not just turn the crtc of. | ||
389 | * Not what userspace wants but it needs to htfu. | ||
390 | */ | ||
391 | if (sou->defined) | ||
392 | return ret; | ||
393 | |||
394 | connector->encoder = NULL; | ||
395 | encoder->crtc = NULL; | ||
396 | crtc->fb = NULL; | ||
397 | crtc->x = 0; | ||
398 | crtc->y = 0; | ||
399 | |||
400 | return ret; | ||
401 | } | ||
402 | |||
403 | vmw_sou_add_active(dev_priv, sou, vfb); | ||
404 | |||
405 | connector->encoder = encoder; | ||
406 | encoder->crtc = crtc; | ||
407 | crtc->mode = *mode; | ||
408 | crtc->fb = fb; | ||
409 | crtc->x = set->x; | ||
410 | crtc->y = set->y; | ||
411 | |||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { | ||
416 | .save = vmw_du_crtc_save, | ||
417 | .restore = vmw_du_crtc_restore, | ||
418 | .cursor_set = vmw_du_crtc_cursor_set, | ||
419 | .cursor_move = vmw_du_crtc_cursor_move, | ||
420 | .gamma_set = vmw_du_crtc_gamma_set, | ||
421 | .destroy = vmw_sou_crtc_destroy, | ||
422 | .set_config = vmw_sou_crtc_set_config, | ||
423 | }; | ||
424 | |||
425 | /* | ||
426 | * Screen Object Display Unit encoder functions | ||
427 | */ | ||
428 | |||
429 | static void vmw_sou_encoder_destroy(struct drm_encoder *encoder) | ||
430 | { | ||
431 | vmw_sou_destroy(vmw_encoder_to_sou(encoder)); | ||
432 | } | ||
433 | |||
434 | static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = { | ||
435 | .destroy = vmw_sou_encoder_destroy, | ||
436 | }; | ||
437 | |||
438 | /* | ||
439 | * Screen Object Display Unit connector functions | ||
440 | */ | ||
441 | |||
442 | static void vmw_sou_connector_destroy(struct drm_connector *connector) | ||
443 | { | ||
444 | vmw_sou_destroy(vmw_connector_to_sou(connector)); | ||
445 | } | ||
446 | |||
447 | static struct drm_connector_funcs vmw_legacy_connector_funcs = { | ||
448 | .dpms = vmw_du_connector_dpms, | ||
449 | .save = vmw_du_connector_save, | ||
450 | .restore = vmw_du_connector_restore, | ||
451 | .detect = vmw_du_connector_detect, | ||
452 | .fill_modes = vmw_du_connector_fill_modes, | ||
453 | .set_property = vmw_du_connector_set_property, | ||
454 | .destroy = vmw_sou_connector_destroy, | ||
455 | }; | ||
456 | |||
457 | static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) | ||
458 | { | ||
459 | struct vmw_screen_object_unit *sou; | ||
460 | struct drm_device *dev = dev_priv->dev; | ||
461 | struct drm_connector *connector; | ||
462 | struct drm_encoder *encoder; | ||
463 | struct drm_crtc *crtc; | ||
464 | |||
465 | sou = kzalloc(sizeof(*sou), GFP_KERNEL); | ||
466 | if (!sou) | ||
467 | return -ENOMEM; | ||
468 | |||
469 | sou->base.unit = unit; | ||
470 | crtc = &sou->base.crtc; | ||
471 | encoder = &sou->base.encoder; | ||
472 | connector = &sou->base.connector; | ||
473 | |||
474 | INIT_LIST_HEAD(&sou->active); | ||
475 | |||
476 | sou->base.pref_active = (unit == 0); | ||
477 | sou->base.pref_width = 800; | ||
478 | sou->base.pref_height = 600; | ||
479 | sou->base.pref_mode = NULL; | ||
480 | |||
481 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | ||
482 | DRM_MODE_CONNECTOR_LVDS); | ||
483 | connector->status = vmw_du_connector_detect(connector, true); | ||
484 | |||
485 | drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs, | ||
486 | DRM_MODE_ENCODER_LVDS); | ||
487 | drm_mode_connector_attach_encoder(connector, encoder); | ||
488 | encoder->possible_crtcs = (1 << unit); | ||
489 | encoder->possible_clones = 0; | ||
490 | |||
491 | drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); | ||
492 | |||
493 | drm_mode_crtc_set_gamma_size(crtc, 256); | ||
494 | |||
495 | drm_connector_attach_property(connector, | ||
496 | dev->mode_config.dirty_info_property, | ||
497 | 1); | ||
498 | |||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv) | ||
503 | { | ||
504 | struct drm_device *dev = dev_priv->dev; | ||
505 | int i, ret; | ||
506 | |||
507 | if (dev_priv->sou_priv) { | ||
508 | DRM_INFO("sou system already on\n"); | ||
509 | return -EINVAL; | ||
510 | } | ||
511 | |||
512 | if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) { | ||
513 | DRM_INFO("Not using screen objects," | ||
514 | " missing cap SCREEN_OBJECT_2\n"); | ||
515 | return -ENOSYS; | ||
516 | } | ||
517 | |||
518 | ret = -ENOMEM; | ||
519 | dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL); | ||
520 | if (unlikely(!dev_priv->sou_priv)) | ||
521 | goto err_no_mem; | ||
522 | |||
523 | INIT_LIST_HEAD(&dev_priv->sou_priv->active); | ||
524 | dev_priv->sou_priv->num_active = 0; | ||
525 | dev_priv->sou_priv->last_num_active = 0; | ||
526 | dev_priv->sou_priv->fb = NULL; | ||
527 | |||
528 | ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); | ||
529 | if (unlikely(ret != 0)) | ||
530 | goto err_free; | ||
531 | |||
532 | ret = drm_mode_create_dirty_info_property(dev); | ||
533 | if (unlikely(ret != 0)) | ||
534 | goto err_vblank_cleanup; | ||
535 | |||
536 | for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) | ||
537 | vmw_sou_init(dev_priv, i); | ||
538 | |||
539 | DRM_INFO("Screen objects system initialized\n"); | ||
540 | |||
541 | return 0; | ||
542 | |||
543 | err_vblank_cleanup: | ||
544 | drm_vblank_cleanup(dev); | ||
545 | err_free: | ||
546 | kfree(dev_priv->sou_priv); | ||
547 | dev_priv->sou_priv = NULL; | ||
548 | err_no_mem: | ||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv) | ||
553 | { | ||
554 | struct drm_device *dev = dev_priv->dev; | ||
555 | |||
556 | if (!dev_priv->sou_priv) | ||
557 | return -ENOSYS; | ||
558 | |||
559 | drm_vblank_cleanup(dev); | ||
560 | |||
561 | if (!list_empty(&dev_priv->sou_priv->active)) | ||
562 | DRM_ERROR("Still have active outputs when unloading driver"); | ||
563 | |||
564 | kfree(dev_priv->sou_priv); | ||
565 | |||
566 | return 0; | ||
567 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 1e8eedd901e0..d3c11f5184f3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | |||
@@ -34,9 +34,8 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) | |||
34 | struct vmw_private *dev_priv; | 34 | struct vmw_private *dev_priv; |
35 | 35 | ||
36 | if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) { | 36 | if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) { |
37 | if (vmw_fifo_mmap(filp, vma) == 0) | 37 | DRM_ERROR("Illegal attempt to mmap old fifo space.\n"); |
38 | return 0; | 38 | return -EINVAL; |
39 | return drm_mmap(filp, vma); | ||
40 | } | 39 | } |
41 | 40 | ||
42 | file_priv = filp->private_data; | 41 | file_priv = filp->private_data; |