diff options
author | Dave Airlie <airlied@redhat.com> | 2009-03-28 20:22:18 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2009-03-28 20:22:18 -0400 |
commit | 90f959bcb386da2c71613dcefc6a285e054a539e (patch) | |
tree | ee3e9dd4111d4aad12e579cb0c2c159114dff263 /drivers/gpu | |
parent | 41f13fe81dd1b08723ab9f3fc3c7f29cfa81f1a5 (diff) | |
parent | 07d43ba98621f08e252a48c96b258b4d572b0257 (diff) |
drm: merge Linux master into HEAD
Conflicts:
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_proc.c
drivers/gpu/drm/i915/i915_gem_debugfs.c
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/drm_fops.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_info.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_sysfs.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 116 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 894 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_debugfs.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_bios.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_crt.c | 66 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 406 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_tv.c | 148 |
14 files changed, 1375 insertions, 353 deletions
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index f52663ebe016..e13cb62bbaee 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -337,14 +337,10 @@ int drm_fasync(int fd, struct file *filp, int on) | |||
337 | { | 337 | { |
338 | struct drm_file *priv = filp->private_data; | 338 | struct drm_file *priv = filp->private_data; |
339 | struct drm_device *dev = priv->minor->dev; | 339 | struct drm_device *dev = priv->minor->dev; |
340 | int retcode; | ||
341 | 340 | ||
342 | DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, | 341 | DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, |
343 | (long)old_encode_dev(priv->minor->device)); | 342 | (long)old_encode_dev(priv->minor->device)); |
344 | retcode = fasync_helper(fd, filp, on, &dev->buf_async); | 343 | return fasync_helper(fd, filp, on, &dev->buf_async); |
345 | if (retcode < 0) | ||
346 | return retcode; | ||
347 | return 0; | ||
348 | } | 344 | } |
349 | EXPORT_SYMBOL(drm_fasync); | 345 | EXPORT_SYMBOL(drm_fasync); |
350 | 346 | ||
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 60a1b6cb376a..f0f6c6b93f3a 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
@@ -286,9 +286,9 @@ int drm_vma_info(struct seq_file *m, void *data) | |||
286 | #endif | 286 | #endif |
287 | 287 | ||
288 | mutex_lock(&dev->struct_mutex); | 288 | mutex_lock(&dev->struct_mutex); |
289 | seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08lx\n", | 289 | seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n", |
290 | atomic_read(&dev->vma_count), | 290 | atomic_read(&dev->vma_count), |
291 | high_memory, virt_to_phys(high_memory)); | 291 | high_memory, (u64)virt_to_phys(high_memory)); |
292 | 292 | ||
293 | list_for_each_entry(pt, &dev->vmalist, head) { | 293 | list_for_each_entry(pt, &dev->vmalist, head) { |
294 | vma = pt->vma; | 294 | vma = pt->vma; |
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index f7510a8f0eb9..5de573a981cb 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c | |||
@@ -349,8 +349,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector) | |||
349 | DRM_DEBUG("adding \"%s\" to sysfs\n", | 349 | DRM_DEBUG("adding \"%s\" to sysfs\n", |
350 | drm_get_connector_name(connector)); | 350 | drm_get_connector_name(connector)); |
351 | 351 | ||
352 | snprintf(connector->kdev.bus_id, BUS_ID_SIZE, "card%d-%s", | 352 | dev_set_name(&connector->kdev, "card%d-%s", |
353 | dev->primary->index, drm_get_connector_name(connector)); | 353 | dev->primary->index, drm_get_connector_name(connector)); |
354 | ret = device_register(&connector->kdev); | 354 | ret = device_register(&connector->kdev); |
355 | 355 | ||
356 | if (ret) { | 356 | if (ret) { |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 4d9f5c6818ca..85549f615b1f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -41,7 +41,6 @@ | |||
41 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | 41 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) |
42 | { | 42 | { |
43 | drm_i915_private_t *dev_priv = dev->dev_private; | 43 | drm_i915_private_t *dev_priv = dev->dev_private; |
44 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
45 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | 44 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); |
46 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; | 45 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; |
47 | u32 last_acthd = I915_READ(acthd_reg); | 46 | u32 last_acthd = I915_READ(acthd_reg); |
@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | |||
58 | if (ring->space >= n) | 57 | if (ring->space >= n) |
59 | return 0; | 58 | return 0; |
60 | 59 | ||
61 | if (master_priv->sarea_priv) | 60 | if (dev->primary->master) { |
62 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 61 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
62 | if (master_priv->sarea_priv) | ||
63 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | ||
64 | } | ||
65 | |||
63 | 66 | ||
64 | if (ring->head != last_head) | 67 | if (ring->head != last_head) |
65 | i = 0; | 68 | i = 0; |
@@ -356,7 +359,7 @@ static int validate_cmd(int cmd) | |||
356 | return ret; | 359 | return ret; |
357 | } | 360 | } |
358 | 361 | ||
359 | static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) | 362 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
360 | { | 363 | { |
361 | drm_i915_private_t *dev_priv = dev->dev_private; | 364 | drm_i915_private_t *dev_priv = dev->dev_private; |
362 | int i; | 365 | int i; |
@@ -370,8 +373,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor | |||
370 | for (i = 0; i < dwords;) { | 373 | for (i = 0; i < dwords;) { |
371 | int cmd, sz; | 374 | int cmd, sz; |
372 | 375 | ||
373 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) | 376 | cmd = buffer[i]; |
374 | return -EINVAL; | ||
375 | 377 | ||
376 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) | 378 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) |
377 | return -EINVAL; | 379 | return -EINVAL; |
@@ -379,11 +381,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor | |||
379 | OUT_RING(cmd); | 381 | OUT_RING(cmd); |
380 | 382 | ||
381 | while (++i, --sz) { | 383 | while (++i, --sz) { |
382 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], | 384 | OUT_RING(buffer[i]); |
383 | sizeof(cmd))) { | ||
384 | return -EINVAL; | ||
385 | } | ||
386 | OUT_RING(cmd); | ||
387 | } | 385 | } |
388 | } | 386 | } |
389 | 387 | ||
@@ -397,17 +395,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor | |||
397 | 395 | ||
398 | int | 396 | int |
399 | i915_emit_box(struct drm_device *dev, | 397 | i915_emit_box(struct drm_device *dev, |
400 | struct drm_clip_rect __user *boxes, | 398 | struct drm_clip_rect *boxes, |
401 | int i, int DR1, int DR4) | 399 | int i, int DR1, int DR4) |
402 | { | 400 | { |
403 | drm_i915_private_t *dev_priv = dev->dev_private; | 401 | drm_i915_private_t *dev_priv = dev->dev_private; |
404 | struct drm_clip_rect box; | 402 | struct drm_clip_rect box = boxes[i]; |
405 | RING_LOCALS; | 403 | RING_LOCALS; |
406 | 404 | ||
407 | if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { | ||
408 | return -EFAULT; | ||
409 | } | ||
410 | |||
411 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { | 405 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { |
412 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | 406 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
413 | box.x1, box.y1, box.x2, box.y2); | 407 | box.x1, box.y1, box.x2, box.y2); |
@@ -460,7 +454,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev) | |||
460 | } | 454 | } |
461 | 455 | ||
462 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, | 456 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
463 | drm_i915_cmdbuffer_t * cmd) | 457 | drm_i915_cmdbuffer_t *cmd, |
458 | struct drm_clip_rect *cliprects, | ||
459 | void *cmdbuf) | ||
464 | { | 460 | { |
465 | int nbox = cmd->num_cliprects; | 461 | int nbox = cmd->num_cliprects; |
466 | int i = 0, count, ret; | 462 | int i = 0, count, ret; |
@@ -476,13 +472,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |||
476 | 472 | ||
477 | for (i = 0; i < count; i++) { | 473 | for (i = 0; i < count; i++) { |
478 | if (i < nbox) { | 474 | if (i < nbox) { |
479 | ret = i915_emit_box(dev, cmd->cliprects, i, | 475 | ret = i915_emit_box(dev, cliprects, i, |
480 | cmd->DR1, cmd->DR4); | 476 | cmd->DR1, cmd->DR4); |
481 | if (ret) | 477 | if (ret) |
482 | return ret; | 478 | return ret; |
483 | } | 479 | } |
484 | 480 | ||
485 | ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); | 481 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); |
486 | if (ret) | 482 | if (ret) |
487 | return ret; | 483 | return ret; |
488 | } | 484 | } |
@@ -492,10 +488,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |||
492 | } | 488 | } |
493 | 489 | ||
494 | static int i915_dispatch_batchbuffer(struct drm_device * dev, | 490 | static int i915_dispatch_batchbuffer(struct drm_device * dev, |
495 | drm_i915_batchbuffer_t * batch) | 491 | drm_i915_batchbuffer_t * batch, |
492 | struct drm_clip_rect *cliprects) | ||
496 | { | 493 | { |
497 | drm_i915_private_t *dev_priv = dev->dev_private; | 494 | drm_i915_private_t *dev_priv = dev->dev_private; |
498 | struct drm_clip_rect __user *boxes = batch->cliprects; | ||
499 | int nbox = batch->num_cliprects; | 495 | int nbox = batch->num_cliprects; |
500 | int i = 0, count; | 496 | int i = 0, count; |
501 | RING_LOCALS; | 497 | RING_LOCALS; |
@@ -511,7 +507,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
511 | 507 | ||
512 | for (i = 0; i < count; i++) { | 508 | for (i = 0; i < count; i++) { |
513 | if (i < nbox) { | 509 | if (i < nbox) { |
514 | int ret = i915_emit_box(dev, boxes, i, | 510 | int ret = i915_emit_box(dev, cliprects, i, |
515 | batch->DR1, batch->DR4); | 511 | batch->DR1, batch->DR4); |
516 | if (ret) | 512 | if (ret) |
517 | return ret; | 513 | return ret; |
@@ -626,6 +622,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, | |||
626 | master_priv->sarea_priv; | 622 | master_priv->sarea_priv; |
627 | drm_i915_batchbuffer_t *batch = data; | 623 | drm_i915_batchbuffer_t *batch = data; |
628 | int ret; | 624 | int ret; |
625 | struct drm_clip_rect *cliprects = NULL; | ||
629 | 626 | ||
630 | if (!dev_priv->allow_batchbuffer) { | 627 | if (!dev_priv->allow_batchbuffer) { |
631 | DRM_ERROR("Batchbuffer ioctl disabled\n"); | 628 | DRM_ERROR("Batchbuffer ioctl disabled\n"); |
@@ -637,17 +634,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, | |||
637 | 634 | ||
638 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | 635 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
639 | 636 | ||
640 | if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, | 637 | if (batch->num_cliprects < 0) |
641 | batch->num_cliprects * | 638 | return -EINVAL; |
642 | sizeof(struct drm_clip_rect))) | 639 | |
643 | return -EFAULT; | 640 | if (batch->num_cliprects) { |
641 | cliprects = drm_calloc(batch->num_cliprects, | ||
642 | sizeof(struct drm_clip_rect), | ||
643 | DRM_MEM_DRIVER); | ||
644 | if (cliprects == NULL) | ||
645 | return -ENOMEM; | ||
646 | |||
647 | ret = copy_from_user(cliprects, batch->cliprects, | ||
648 | batch->num_cliprects * | ||
649 | sizeof(struct drm_clip_rect)); | ||
650 | if (ret != 0) | ||
651 | goto fail_free; | ||
652 | } | ||
644 | 653 | ||
645 | mutex_lock(&dev->struct_mutex); | 654 | mutex_lock(&dev->struct_mutex); |
646 | ret = i915_dispatch_batchbuffer(dev, batch); | 655 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); |
647 | mutex_unlock(&dev->struct_mutex); | 656 | mutex_unlock(&dev->struct_mutex); |
648 | 657 | ||
649 | if (sarea_priv) | 658 | if (sarea_priv) |
650 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | 659 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
660 | |||
661 | fail_free: | ||
662 | drm_free(cliprects, | ||
663 | batch->num_cliprects * sizeof(struct drm_clip_rect), | ||
664 | DRM_MEM_DRIVER); | ||
665 | |||
651 | return ret; | 666 | return ret; |
652 | } | 667 | } |
653 | 668 | ||
@@ -659,6 +674,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
659 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | 674 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
660 | master_priv->sarea_priv; | 675 | master_priv->sarea_priv; |
661 | drm_i915_cmdbuffer_t *cmdbuf = data; | 676 | drm_i915_cmdbuffer_t *cmdbuf = data; |
677 | struct drm_clip_rect *cliprects = NULL; | ||
678 | void *batch_data; | ||
662 | int ret; | 679 | int ret; |
663 | 680 | ||
664 | DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", | 681 | DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", |
@@ -666,25 +683,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
666 | 683 | ||
667 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | 684 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
668 | 685 | ||
669 | if (cmdbuf->num_cliprects && | 686 | if (cmdbuf->num_cliprects < 0) |
670 | DRM_VERIFYAREA_READ(cmdbuf->cliprects, | 687 | return -EINVAL; |
671 | cmdbuf->num_cliprects * | 688 | |
672 | sizeof(struct drm_clip_rect))) { | 689 | batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER); |
673 | DRM_ERROR("Fault accessing cliprects\n"); | 690 | if (batch_data == NULL) |
674 | return -EFAULT; | 691 | return -ENOMEM; |
692 | |||
693 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); | ||
694 | if (ret != 0) | ||
695 | goto fail_batch_free; | ||
696 | |||
697 | if (cmdbuf->num_cliprects) { | ||
698 | cliprects = drm_calloc(cmdbuf->num_cliprects, | ||
699 | sizeof(struct drm_clip_rect), | ||
700 | DRM_MEM_DRIVER); | ||
701 | if (cliprects == NULL) | ||
702 | goto fail_batch_free; | ||
703 | |||
704 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | ||
705 | cmdbuf->num_cliprects * | ||
706 | sizeof(struct drm_clip_rect)); | ||
707 | if (ret != 0) | ||
708 | goto fail_clip_free; | ||
675 | } | 709 | } |
676 | 710 | ||
677 | mutex_lock(&dev->struct_mutex); | 711 | mutex_lock(&dev->struct_mutex); |
678 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf); | 712 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); |
679 | mutex_unlock(&dev->struct_mutex); | 713 | mutex_unlock(&dev->struct_mutex); |
680 | if (ret) { | 714 | if (ret) { |
681 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); | 715 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); |
682 | return ret; | 716 | goto fail_batch_free; |
683 | } | 717 | } |
684 | 718 | ||
685 | if (sarea_priv) | 719 | if (sarea_priv) |
686 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | 720 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
687 | return 0; | 721 | |
722 | fail_batch_free: | ||
723 | drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER); | ||
724 | fail_clip_free: | ||
725 | drm_free(cliprects, | ||
726 | cmdbuf->num_cliprects * sizeof(struct drm_clip_rect), | ||
727 | DRM_MEM_DRIVER); | ||
728 | |||
729 | return ret; | ||
688 | } | 730 | } |
689 | 731 | ||
690 | static int i915_flip_bufs(struct drm_device *dev, void *data, | 732 | static int i915_flip_bufs(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1bc45a78ffcd..c1685d0c704f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -404,7 +404,8 @@ struct drm_i915_gem_object { | |||
404 | /** AGP memory structure for our GTT binding. */ | 404 | /** AGP memory structure for our GTT binding. */ |
405 | DRM_AGP_MEM *agp_mem; | 405 | DRM_AGP_MEM *agp_mem; |
406 | 406 | ||
407 | struct page **page_list; | 407 | struct page **pages; |
408 | int pages_refcount; | ||
408 | 409 | ||
409 | /** | 410 | /** |
410 | * Current offset of the object in GTT space. | 411 | * Current offset of the object in GTT space. |
@@ -519,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev); | |||
519 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, | 520 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
520 | unsigned long arg); | 521 | unsigned long arg); |
521 | extern int i915_emit_box(struct drm_device *dev, | 522 | extern int i915_emit_box(struct drm_device *dev, |
522 | struct drm_clip_rect __user *boxes, | 523 | struct drm_clip_rect *boxes, |
523 | int i, int DR1, int DR4); | 524 | int i, int DR1, int DR4); |
524 | 525 | ||
525 | /* i915_irq.c */ | 526 | /* i915_irq.c */ |
@@ -786,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
786 | (dev)->pci_device == 0x2E22 || \ | 787 | (dev)->pci_device == 0x2E22 || \ |
787 | IS_GM45(dev)) | 788 | IS_GM45(dev)) |
788 | 789 | ||
790 | #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) | ||
791 | #define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) | ||
792 | #define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) | ||
793 | |||
789 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ | 794 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ |
790 | (dev)->pci_device == 0x29B2 || \ | 795 | (dev)->pci_device == 0x29B2 || \ |
791 | (dev)->pci_device == 0x29D2) | 796 | (dev)->pci_device == 0x29D2 || \ |
797 | (IS_IGD(dev))) | ||
792 | 798 | ||
793 | #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ | 799 | #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ |
794 | IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) | 800 | IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) |
795 | 801 | ||
796 | #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ | 802 | #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ |
797 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) | 803 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ |
804 | IS_IGD(dev)) | ||
798 | 805 | ||
799 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) | 806 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) |
800 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 807 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8d5ec5fd5252..e5d2bdf2cc9b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
43 | uint64_t offset, | 43 | uint64_t offset, |
44 | uint64_t size); | 44 | uint64_t size); |
45 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); | 45 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); |
46 | static int i915_gem_object_get_page_list(struct drm_gem_object *obj); | 46 | static int i915_gem_object_get_pages(struct drm_gem_object *obj); |
47 | static void i915_gem_object_free_page_list(struct drm_gem_object *obj); | 47 | static void i915_gem_object_put_pages(struct drm_gem_object *obj); |
48 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | 48 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); |
49 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 49 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
50 | unsigned alignment); | 50 | unsigned alignment); |
@@ -136,6 +136,224 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
136 | return 0; | 136 | return 0; |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline int | ||
140 | fast_shmem_read(struct page **pages, | ||
141 | loff_t page_base, int page_offset, | ||
142 | char __user *data, | ||
143 | int length) | ||
144 | { | ||
145 | char __iomem *vaddr; | ||
146 | int ret; | ||
147 | |||
148 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); | ||
149 | if (vaddr == NULL) | ||
150 | return -ENOMEM; | ||
151 | ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); | ||
152 | kunmap_atomic(vaddr, KM_USER0); | ||
153 | |||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | static inline int | ||
158 | slow_shmem_copy(struct page *dst_page, | ||
159 | int dst_offset, | ||
160 | struct page *src_page, | ||
161 | int src_offset, | ||
162 | int length) | ||
163 | { | ||
164 | char *dst_vaddr, *src_vaddr; | ||
165 | |||
166 | dst_vaddr = kmap_atomic(dst_page, KM_USER0); | ||
167 | if (dst_vaddr == NULL) | ||
168 | return -ENOMEM; | ||
169 | |||
170 | src_vaddr = kmap_atomic(src_page, KM_USER1); | ||
171 | if (src_vaddr == NULL) { | ||
172 | kunmap_atomic(dst_vaddr, KM_USER0); | ||
173 | return -ENOMEM; | ||
174 | } | ||
175 | |||
176 | memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); | ||
177 | |||
178 | kunmap_atomic(src_vaddr, KM_USER1); | ||
179 | kunmap_atomic(dst_vaddr, KM_USER0); | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | /** | ||
185 | * This is the fast shmem pread path, which attempts to copy_from_user directly | ||
186 | * from the backing pages of the object to the user's address space. On a | ||
187 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). | ||
188 | */ | ||
189 | static int | ||
190 | i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | ||
191 | struct drm_i915_gem_pread *args, | ||
192 | struct drm_file *file_priv) | ||
193 | { | ||
194 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
195 | ssize_t remain; | ||
196 | loff_t offset, page_base; | ||
197 | char __user *user_data; | ||
198 | int page_offset, page_length; | ||
199 | int ret; | ||
200 | |||
201 | user_data = (char __user *) (uintptr_t) args->data_ptr; | ||
202 | remain = args->size; | ||
203 | |||
204 | mutex_lock(&dev->struct_mutex); | ||
205 | |||
206 | ret = i915_gem_object_get_pages(obj); | ||
207 | if (ret != 0) | ||
208 | goto fail_unlock; | ||
209 | |||
210 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | ||
211 | args->size); | ||
212 | if (ret != 0) | ||
213 | goto fail_put_pages; | ||
214 | |||
215 | obj_priv = obj->driver_private; | ||
216 | offset = args->offset; | ||
217 | |||
218 | while (remain > 0) { | ||
219 | /* Operation in this page | ||
220 | * | ||
221 | * page_base = page offset within aperture | ||
222 | * page_offset = offset within page | ||
223 | * page_length = bytes to copy for this page | ||
224 | */ | ||
225 | page_base = (offset & ~(PAGE_SIZE-1)); | ||
226 | page_offset = offset & (PAGE_SIZE-1); | ||
227 | page_length = remain; | ||
228 | if ((page_offset + remain) > PAGE_SIZE) | ||
229 | page_length = PAGE_SIZE - page_offset; | ||
230 | |||
231 | ret = fast_shmem_read(obj_priv->pages, | ||
232 | page_base, page_offset, | ||
233 | user_data, page_length); | ||
234 | if (ret) | ||
235 | goto fail_put_pages; | ||
236 | |||
237 | remain -= page_length; | ||
238 | user_data += page_length; | ||
239 | offset += page_length; | ||
240 | } | ||
241 | |||
242 | fail_put_pages: | ||
243 | i915_gem_object_put_pages(obj); | ||
244 | fail_unlock: | ||
245 | mutex_unlock(&dev->struct_mutex); | ||
246 | |||
247 | return ret; | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * This is the fallback shmem pread path, which allocates temporary storage | ||
252 | * in kernel space to copy_to_user into outside of the struct_mutex, so we | ||
253 | * can copy out of the object's backing pages while holding the struct mutex | ||
254 | * and not take page faults. | ||
255 | */ | ||
256 | static int | ||
257 | i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | ||
258 | struct drm_i915_gem_pread *args, | ||
259 | struct drm_file *file_priv) | ||
260 | { | ||
261 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
262 | struct mm_struct *mm = current->mm; | ||
263 | struct page **user_pages; | ||
264 | ssize_t remain; | ||
265 | loff_t offset, pinned_pages, i; | ||
266 | loff_t first_data_page, last_data_page, num_pages; | ||
267 | int shmem_page_index, shmem_page_offset; | ||
268 | int data_page_index, data_page_offset; | ||
269 | int page_length; | ||
270 | int ret; | ||
271 | uint64_t data_ptr = args->data_ptr; | ||
272 | |||
273 | remain = args->size; | ||
274 | |||
275 | /* Pin the user pages containing the data. We can't fault while | ||
276 | * holding the struct mutex, yet we want to hold it while | ||
277 | * dereferencing the user data. | ||
278 | */ | ||
279 | first_data_page = data_ptr / PAGE_SIZE; | ||
280 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | ||
281 | num_pages = last_data_page - first_data_page + 1; | ||
282 | |||
283 | user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); | ||
284 | if (user_pages == NULL) | ||
285 | return -ENOMEM; | ||
286 | |||
287 | down_read(&mm->mmap_sem); | ||
288 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | ||
289 | num_pages, 0, 0, user_pages, NULL); | ||
290 | up_read(&mm->mmap_sem); | ||
291 | if (pinned_pages < num_pages) { | ||
292 | ret = -EFAULT; | ||
293 | goto fail_put_user_pages; | ||
294 | } | ||
295 | |||
296 | mutex_lock(&dev->struct_mutex); | ||
297 | |||
298 | ret = i915_gem_object_get_pages(obj); | ||
299 | if (ret != 0) | ||
300 | goto fail_unlock; | ||
301 | |||
302 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | ||
303 | args->size); | ||
304 | if (ret != 0) | ||
305 | goto fail_put_pages; | ||
306 | |||
307 | obj_priv = obj->driver_private; | ||
308 | offset = args->offset; | ||
309 | |||
310 | while (remain > 0) { | ||
311 | /* Operation in this page | ||
312 | * | ||
313 | * shmem_page_index = page number within shmem file | ||
314 | * shmem_page_offset = offset within page in shmem file | ||
315 | * data_page_index = page number in get_user_pages return | ||
316 | * data_page_offset = offset with data_page_index page. | ||
317 | * page_length = bytes to copy for this page | ||
318 | */ | ||
319 | shmem_page_index = offset / PAGE_SIZE; | ||
320 | shmem_page_offset = offset & ~PAGE_MASK; | ||
321 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | ||
322 | data_page_offset = data_ptr & ~PAGE_MASK; | ||
323 | |||
324 | page_length = remain; | ||
325 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | ||
326 | page_length = PAGE_SIZE - shmem_page_offset; | ||
327 | if ((data_page_offset + page_length) > PAGE_SIZE) | ||
328 | page_length = PAGE_SIZE - data_page_offset; | ||
329 | |||
330 | ret = slow_shmem_copy(user_pages[data_page_index], | ||
331 | data_page_offset, | ||
332 | obj_priv->pages[shmem_page_index], | ||
333 | shmem_page_offset, | ||
334 | page_length); | ||
335 | if (ret) | ||
336 | goto fail_put_pages; | ||
337 | |||
338 | remain -= page_length; | ||
339 | data_ptr += page_length; | ||
340 | offset += page_length; | ||
341 | } | ||
342 | |||
343 | fail_put_pages: | ||
344 | i915_gem_object_put_pages(obj); | ||
345 | fail_unlock: | ||
346 | mutex_unlock(&dev->struct_mutex); | ||
347 | fail_put_user_pages: | ||
348 | for (i = 0; i < pinned_pages; i++) { | ||
349 | SetPageDirty(user_pages[i]); | ||
350 | page_cache_release(user_pages[i]); | ||
351 | } | ||
352 | kfree(user_pages); | ||
353 | |||
354 | return ret; | ||
355 | } | ||
356 | |||
139 | /** | 357 | /** |
140 | * Reads data from the object referenced by handle. | 358 | * Reads data from the object referenced by handle. |
141 | * | 359 | * |
@@ -148,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
148 | struct drm_i915_gem_pread *args = data; | 366 | struct drm_i915_gem_pread *args = data; |
149 | struct drm_gem_object *obj; | 367 | struct drm_gem_object *obj; |
150 | struct drm_i915_gem_object *obj_priv; | 368 | struct drm_i915_gem_object *obj_priv; |
151 | ssize_t read; | ||
152 | loff_t offset; | ||
153 | int ret; | 369 | int ret; |
154 | 370 | ||
155 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 371 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
@@ -167,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
167 | return -EINVAL; | 383 | return -EINVAL; |
168 | } | 384 | } |
169 | 385 | ||
170 | mutex_lock(&dev->struct_mutex); | 386 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); |
171 | 387 | if (ret != 0) | |
172 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | 388 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); |
173 | args->size); | ||
174 | if (ret != 0) { | ||
175 | drm_gem_object_unreference(obj); | ||
176 | mutex_unlock(&dev->struct_mutex); | ||
177 | return ret; | ||
178 | } | ||
179 | |||
180 | offset = args->offset; | ||
181 | |||
182 | read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, | ||
183 | args->size, &offset); | ||
184 | if (read != args->size) { | ||
185 | drm_gem_object_unreference(obj); | ||
186 | mutex_unlock(&dev->struct_mutex); | ||
187 | if (read < 0) | ||
188 | return read; | ||
189 | else | ||
190 | return -EINVAL; | ||
191 | } | ||
192 | 389 | ||
193 | drm_gem_object_unreference(obj); | 390 | drm_gem_object_unreference(obj); |
194 | mutex_unlock(&dev->struct_mutex); | ||
195 | 391 | ||
196 | return 0; | 392 | return ret; |
197 | } | 393 | } |
198 | 394 | ||
199 | /* This is the fast write path which cannot handle | 395 | /* This is the fast write path which cannot handle |
@@ -223,29 +419,51 @@ fast_user_write(struct io_mapping *mapping, | |||
223 | */ | 419 | */ |
224 | 420 | ||
225 | static inline int | 421 | static inline int |
226 | slow_user_write(struct io_mapping *mapping, | 422 | slow_kernel_write(struct io_mapping *mapping, |
227 | loff_t page_base, int page_offset, | 423 | loff_t gtt_base, int gtt_offset, |
228 | char __user *user_data, | 424 | struct page *user_page, int user_offset, |
229 | int length) | 425 | int length) |
230 | { | 426 | { |
231 | char __iomem *vaddr; | 427 | char *src_vaddr, *dst_vaddr; |
232 | unsigned long unwritten; | 428 | unsigned long unwritten; |
233 | 429 | ||
234 | vaddr = io_mapping_map_wc(mapping, page_base); | 430 | dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); |
235 | if (vaddr == NULL) | 431 | src_vaddr = kmap_atomic(user_page, KM_USER1); |
236 | return -EFAULT; | 432 | unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, |
237 | unwritten = __copy_from_user(vaddr + page_offset, | 433 | src_vaddr + user_offset, |
238 | user_data, length); | 434 | length); |
239 | io_mapping_unmap(vaddr); | 435 | kunmap_atomic(src_vaddr, KM_USER1); |
436 | io_mapping_unmap_atomic(dst_vaddr); | ||
240 | if (unwritten) | 437 | if (unwritten) |
241 | return -EFAULT; | 438 | return -EFAULT; |
242 | return 0; | 439 | return 0; |
243 | } | 440 | } |
244 | 441 | ||
442 | static inline int | ||
443 | fast_shmem_write(struct page **pages, | ||
444 | loff_t page_base, int page_offset, | ||
445 | char __user *data, | ||
446 | int length) | ||
447 | { | ||
448 | char __iomem *vaddr; | ||
449 | |||
450 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); | ||
451 | if (vaddr == NULL) | ||
452 | return -ENOMEM; | ||
453 | __copy_from_user_inatomic(vaddr + page_offset, data, length); | ||
454 | kunmap_atomic(vaddr, KM_USER0); | ||
455 | |||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * This is the fast pwrite path, where we copy the data directly from the | ||
461 | * user into the GTT, uncached. | ||
462 | */ | ||
245 | static int | 463 | static int |
246 | i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 464 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, |
247 | struct drm_i915_gem_pwrite *args, | 465 | struct drm_i915_gem_pwrite *args, |
248 | struct drm_file *file_priv) | 466 | struct drm_file *file_priv) |
249 | { | 467 | { |
250 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 468 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
251 | drm_i915_private_t *dev_priv = dev->dev_private; | 469 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -273,7 +491,6 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
273 | 491 | ||
274 | obj_priv = obj->driver_private; | 492 | obj_priv = obj->driver_private; |
275 | offset = obj_priv->gtt_offset + args->offset; | 493 | offset = obj_priv->gtt_offset + args->offset; |
276 | obj_priv->dirty = 1; | ||
277 | 494 | ||
278 | while (remain > 0) { | 495 | while (remain > 0) { |
279 | /* Operation in this page | 496 | /* Operation in this page |
@@ -292,16 +509,11 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
292 | page_offset, user_data, page_length); | 509 | page_offset, user_data, page_length); |
293 | 510 | ||
294 | /* If we get a fault while copying data, then (presumably) our | 511 | /* If we get a fault while copying data, then (presumably) our |
295 | * source page isn't available. In this case, use the | 512 | * source page isn't available. Return the error and we'll |
296 | * non-atomic function | 513 | * retry in the slow path. |
297 | */ | 514 | */ |
298 | if (ret) { | 515 | if (ret) |
299 | ret = slow_user_write (dev_priv->mm.gtt_mapping, | 516 | goto fail; |
300 | page_base, page_offset, | ||
301 | user_data, page_length); | ||
302 | if (ret) | ||
303 | goto fail; | ||
304 | } | ||
305 | 517 | ||
306 | remain -= page_length; | 518 | remain -= page_length; |
307 | user_data += page_length; | 519 | user_data += page_length; |
@@ -315,39 +527,284 @@ fail: | |||
315 | return ret; | 527 | return ret; |
316 | } | 528 | } |
317 | 529 | ||
530 | /** | ||
531 | * This is the fallback GTT pwrite path, which uses get_user_pages to pin | ||
532 | * the memory and maps it using kmap_atomic for copying. | ||
533 | * | ||
534 | * This code resulted in x11perf -rgb10text consuming about 10% more CPU | ||
535 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). | ||
536 | */ | ||
318 | static int | 537 | static int |
319 | i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 538 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, |
320 | struct drm_i915_gem_pwrite *args, | 539 | struct drm_i915_gem_pwrite *args, |
321 | struct drm_file *file_priv) | 540 | struct drm_file *file_priv) |
322 | { | 541 | { |
542 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
543 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
544 | ssize_t remain; | ||
545 | loff_t gtt_page_base, offset; | ||
546 | loff_t first_data_page, last_data_page, num_pages; | ||
547 | loff_t pinned_pages, i; | ||
548 | struct page **user_pages; | ||
549 | struct mm_struct *mm = current->mm; | ||
550 | int gtt_page_offset, data_page_offset, data_page_index, page_length; | ||
323 | int ret; | 551 | int ret; |
324 | loff_t offset; | 552 | uint64_t data_ptr = args->data_ptr; |
325 | ssize_t written; | 553 | |
554 | remain = args->size; | ||
555 | |||
556 | /* Pin the user pages containing the data. We can't fault while | ||
557 | * holding the struct mutex, and all of the pwrite implementations | ||
558 | * want to hold it while dereferencing the user data. | ||
559 | */ | ||
560 | first_data_page = data_ptr / PAGE_SIZE; | ||
561 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | ||
562 | num_pages = last_data_page - first_data_page + 1; | ||
563 | |||
564 | user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); | ||
565 | if (user_pages == NULL) | ||
566 | return -ENOMEM; | ||
567 | |||
568 | down_read(&mm->mmap_sem); | ||
569 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | ||
570 | num_pages, 0, 0, user_pages, NULL); | ||
571 | up_read(&mm->mmap_sem); | ||
572 | if (pinned_pages < num_pages) { | ||
573 | ret = -EFAULT; | ||
574 | goto out_unpin_pages; | ||
575 | } | ||
326 | 576 | ||
327 | mutex_lock(&dev->struct_mutex); | 577 | mutex_lock(&dev->struct_mutex); |
578 | ret = i915_gem_object_pin(obj, 0); | ||
579 | if (ret) | ||
580 | goto out_unlock; | ||
581 | |||
582 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | ||
583 | if (ret) | ||
584 | goto out_unpin_object; | ||
585 | |||
586 | obj_priv = obj->driver_private; | ||
587 | offset = obj_priv->gtt_offset + args->offset; | ||
588 | |||
589 | while (remain > 0) { | ||
590 | /* Operation in this page | ||
591 | * | ||
592 | * gtt_page_base = page offset within aperture | ||
593 | * gtt_page_offset = offset within page in aperture | ||
594 | * data_page_index = page number in get_user_pages return | ||
595 | * data_page_offset = offset with data_page_index page. | ||
596 | * page_length = bytes to copy for this page | ||
597 | */ | ||
598 | gtt_page_base = offset & PAGE_MASK; | ||
599 | gtt_page_offset = offset & ~PAGE_MASK; | ||
600 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | ||
601 | data_page_offset = data_ptr & ~PAGE_MASK; | ||
602 | |||
603 | page_length = remain; | ||
604 | if ((gtt_page_offset + page_length) > PAGE_SIZE) | ||
605 | page_length = PAGE_SIZE - gtt_page_offset; | ||
606 | if ((data_page_offset + page_length) > PAGE_SIZE) | ||
607 | page_length = PAGE_SIZE - data_page_offset; | ||
608 | |||
609 | ret = slow_kernel_write(dev_priv->mm.gtt_mapping, | ||
610 | gtt_page_base, gtt_page_offset, | ||
611 | user_pages[data_page_index], | ||
612 | data_page_offset, | ||
613 | page_length); | ||
614 | |||
615 | /* If we get a fault while copying data, then (presumably) our | ||
616 | * source page isn't available. Return the error and we'll | ||
617 | * retry in the slow path. | ||
618 | */ | ||
619 | if (ret) | ||
620 | goto out_unpin_object; | ||
621 | |||
622 | remain -= page_length; | ||
623 | offset += page_length; | ||
624 | data_ptr += page_length; | ||
625 | } | ||
626 | |||
627 | out_unpin_object: | ||
628 | i915_gem_object_unpin(obj); | ||
629 | out_unlock: | ||
630 | mutex_unlock(&dev->struct_mutex); | ||
631 | out_unpin_pages: | ||
632 | for (i = 0; i < pinned_pages; i++) | ||
633 | page_cache_release(user_pages[i]); | ||
634 | kfree(user_pages); | ||
635 | |||
636 | return ret; | ||
637 | } | ||
638 | |||
639 | /** | ||
640 | * This is the fast shmem pwrite path, which attempts to directly | ||
641 | * copy_from_user into the kmapped pages backing the object. | ||
642 | */ | ||
643 | static int | ||
644 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | ||
645 | struct drm_i915_gem_pwrite *args, | ||
646 | struct drm_file *file_priv) | ||
647 | { | ||
648 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
649 | ssize_t remain; | ||
650 | loff_t offset, page_base; | ||
651 | char __user *user_data; | ||
652 | int page_offset, page_length; | ||
653 | int ret; | ||
654 | |||
655 | user_data = (char __user *) (uintptr_t) args->data_ptr; | ||
656 | remain = args->size; | ||
657 | |||
658 | mutex_lock(&dev->struct_mutex); | ||
659 | |||
660 | ret = i915_gem_object_get_pages(obj); | ||
661 | if (ret != 0) | ||
662 | goto fail_unlock; | ||
328 | 663 | ||
329 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 664 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
330 | if (ret) { | 665 | if (ret != 0) |
331 | mutex_unlock(&dev->struct_mutex); | 666 | goto fail_put_pages; |
332 | return ret; | 667 | |
668 | obj_priv = obj->driver_private; | ||
669 | offset = args->offset; | ||
670 | obj_priv->dirty = 1; | ||
671 | |||
672 | while (remain > 0) { | ||
673 | /* Operation in this page | ||
674 | * | ||
675 | * page_base = page offset within aperture | ||
676 | * page_offset = offset within page | ||
677 | * page_length = bytes to copy for this page | ||
678 | */ | ||
679 | page_base = (offset & ~(PAGE_SIZE-1)); | ||
680 | page_offset = offset & (PAGE_SIZE-1); | ||
681 | page_length = remain; | ||
682 | if ((page_offset + remain) > PAGE_SIZE) | ||
683 | page_length = PAGE_SIZE - page_offset; | ||
684 | |||
685 | ret = fast_shmem_write(obj_priv->pages, | ||
686 | page_base, page_offset, | ||
687 | user_data, page_length); | ||
688 | if (ret) | ||
689 | goto fail_put_pages; | ||
690 | |||
691 | remain -= page_length; | ||
692 | user_data += page_length; | ||
693 | offset += page_length; | ||
694 | } | ||
695 | |||
696 | fail_put_pages: | ||
697 | i915_gem_object_put_pages(obj); | ||
698 | fail_unlock: | ||
699 | mutex_unlock(&dev->struct_mutex); | ||
700 | |||
701 | return ret; | ||
702 | } | ||
703 | |||
704 | /** | ||
705 | * This is the fallback shmem pwrite path, which uses get_user_pages to pin | ||
706 | * the memory and maps it using kmap_atomic for copying. | ||
707 | * | ||
708 | * This avoids taking mmap_sem for faulting on the user's address while the | ||
709 | * struct_mutex is held. | ||
710 | */ | ||
711 | static int | ||
712 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | ||
713 | struct drm_i915_gem_pwrite *args, | ||
714 | struct drm_file *file_priv) | ||
715 | { | ||
716 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
717 | struct mm_struct *mm = current->mm; | ||
718 | struct page **user_pages; | ||
719 | ssize_t remain; | ||
720 | loff_t offset, pinned_pages, i; | ||
721 | loff_t first_data_page, last_data_page, num_pages; | ||
722 | int shmem_page_index, shmem_page_offset; | ||
723 | int data_page_index, data_page_offset; | ||
724 | int page_length; | ||
725 | int ret; | ||
726 | uint64_t data_ptr = args->data_ptr; | ||
727 | |||
728 | remain = args->size; | ||
729 | |||
730 | /* Pin the user pages containing the data. We can't fault while | ||
731 | * holding the struct mutex, and all of the pwrite implementations | ||
732 | * want to hold it while dereferencing the user data. | ||
733 | */ | ||
734 | first_data_page = data_ptr / PAGE_SIZE; | ||
735 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | ||
736 | num_pages = last_data_page - first_data_page + 1; | ||
737 | |||
738 | user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); | ||
739 | if (user_pages == NULL) | ||
740 | return -ENOMEM; | ||
741 | |||
742 | down_read(&mm->mmap_sem); | ||
743 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | ||
744 | num_pages, 0, 0, user_pages, NULL); | ||
745 | up_read(&mm->mmap_sem); | ||
746 | if (pinned_pages < num_pages) { | ||
747 | ret = -EFAULT; | ||
748 | goto fail_put_user_pages; | ||
333 | } | 749 | } |
334 | 750 | ||
751 | mutex_lock(&dev->struct_mutex); | ||
752 | |||
753 | ret = i915_gem_object_get_pages(obj); | ||
754 | if (ret != 0) | ||
755 | goto fail_unlock; | ||
756 | |||
757 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
758 | if (ret != 0) | ||
759 | goto fail_put_pages; | ||
760 | |||
761 | obj_priv = obj->driver_private; | ||
335 | offset = args->offset; | 762 | offset = args->offset; |
763 | obj_priv->dirty = 1; | ||
336 | 764 | ||
337 | written = vfs_write(obj->filp, | 765 | while (remain > 0) { |
338 | (char __user *)(uintptr_t) args->data_ptr, | 766 | /* Operation in this page |
339 | args->size, &offset); | 767 | * |
340 | if (written != args->size) { | 768 | * shmem_page_index = page number within shmem file |
341 | mutex_unlock(&dev->struct_mutex); | 769 | * shmem_page_offset = offset within page in shmem file |
342 | if (written < 0) | 770 | * data_page_index = page number in get_user_pages return |
343 | return written; | 771 | * data_page_offset = offset with data_page_index page. |
344 | else | 772 | * page_length = bytes to copy for this page |
345 | return -EINVAL; | 773 | */ |
774 | shmem_page_index = offset / PAGE_SIZE; | ||
775 | shmem_page_offset = offset & ~PAGE_MASK; | ||
776 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | ||
777 | data_page_offset = data_ptr & ~PAGE_MASK; | ||
778 | |||
779 | page_length = remain; | ||
780 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | ||
781 | page_length = PAGE_SIZE - shmem_page_offset; | ||
782 | if ((data_page_offset + page_length) > PAGE_SIZE) | ||
783 | page_length = PAGE_SIZE - data_page_offset; | ||
784 | |||
785 | ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], | ||
786 | shmem_page_offset, | ||
787 | user_pages[data_page_index], | ||
788 | data_page_offset, | ||
789 | page_length); | ||
790 | if (ret) | ||
791 | goto fail_put_pages; | ||
792 | |||
793 | remain -= page_length; | ||
794 | data_ptr += page_length; | ||
795 | offset += page_length; | ||
346 | } | 796 | } |
347 | 797 | ||
798 | fail_put_pages: | ||
799 | i915_gem_object_put_pages(obj); | ||
800 | fail_unlock: | ||
348 | mutex_unlock(&dev->struct_mutex); | 801 | mutex_unlock(&dev->struct_mutex); |
802 | fail_put_user_pages: | ||
803 | for (i = 0; i < pinned_pages; i++) | ||
804 | page_cache_release(user_pages[i]); | ||
805 | kfree(user_pages); | ||
349 | 806 | ||
350 | return 0; | 807 | return ret; |
351 | } | 808 | } |
352 | 809 | ||
353 | /** | 810 | /** |
@@ -388,10 +845,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
388 | if (obj_priv->phys_obj) | 845 | if (obj_priv->phys_obj) |
389 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); | 846 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); |
390 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | 847 | else if (obj_priv->tiling_mode == I915_TILING_NONE && |
391 | dev->gtt_total != 0) | 848 | dev->gtt_total != 0) { |
392 | ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); | 849 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); |
393 | else | 850 | if (ret == -EFAULT) { |
394 | ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); | 851 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, |
852 | file_priv); | ||
853 | } | ||
854 | } else { | ||
855 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); | ||
856 | if (ret == -EFAULT) { | ||
857 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, | ||
858 | file_priv); | ||
859 | } | ||
860 | } | ||
395 | 861 | ||
396 | #if WATCH_PWRITE | 862 | #if WATCH_PWRITE |
397 | if (ret) | 863 | if (ret) |
@@ -816,29 +1282,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
816 | } | 1282 | } |
817 | 1283 | ||
818 | static void | 1284 | static void |
819 | i915_gem_object_free_page_list(struct drm_gem_object *obj) | 1285 | i915_gem_object_put_pages(struct drm_gem_object *obj) |
820 | { | 1286 | { |
821 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1287 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
822 | int page_count = obj->size / PAGE_SIZE; | 1288 | int page_count = obj->size / PAGE_SIZE; |
823 | int i; | 1289 | int i; |
824 | 1290 | ||
825 | if (obj_priv->page_list == NULL) | 1291 | BUG_ON(obj_priv->pages_refcount == 0); |
826 | return; | ||
827 | 1292 | ||
1293 | if (--obj_priv->pages_refcount != 0) | ||
1294 | return; | ||
828 | 1295 | ||
829 | for (i = 0; i < page_count; i++) | 1296 | for (i = 0; i < page_count; i++) |
830 | if (obj_priv->page_list[i] != NULL) { | 1297 | if (obj_priv->pages[i] != NULL) { |
831 | if (obj_priv->dirty) | 1298 | if (obj_priv->dirty) |
832 | set_page_dirty(obj_priv->page_list[i]); | 1299 | set_page_dirty(obj_priv->pages[i]); |
833 | mark_page_accessed(obj_priv->page_list[i]); | 1300 | mark_page_accessed(obj_priv->pages[i]); |
834 | page_cache_release(obj_priv->page_list[i]); | 1301 | page_cache_release(obj_priv->pages[i]); |
835 | } | 1302 | } |
836 | obj_priv->dirty = 0; | 1303 | obj_priv->dirty = 0; |
837 | 1304 | ||
838 | drm_free(obj_priv->page_list, | 1305 | drm_free(obj_priv->pages, |
839 | page_count * sizeof(struct page *), | 1306 | page_count * sizeof(struct page *), |
840 | DRM_MEM_DRIVER); | 1307 | DRM_MEM_DRIVER); |
841 | obj_priv->page_list = NULL; | 1308 | obj_priv->pages = NULL; |
842 | } | 1309 | } |
843 | 1310 | ||
844 | static void | 1311 | static void |
@@ -1290,7 +1757,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1290 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 1757 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) |
1291 | i915_gem_clear_fence_reg(obj); | 1758 | i915_gem_clear_fence_reg(obj); |
1292 | 1759 | ||
1293 | i915_gem_object_free_page_list(obj); | 1760 | i915_gem_object_put_pages(obj); |
1294 | 1761 | ||
1295 | if (obj_priv->gtt_space) { | 1762 | if (obj_priv->gtt_space) { |
1296 | atomic_dec(&dev->gtt_count); | 1763 | atomic_dec(&dev->gtt_count); |
@@ -1409,7 +1876,7 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
1409 | } | 1876 | } |
1410 | 1877 | ||
1411 | static int | 1878 | static int |
1412 | i915_gem_object_get_page_list(struct drm_gem_object *obj) | 1879 | i915_gem_object_get_pages(struct drm_gem_object *obj) |
1413 | { | 1880 | { |
1414 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1881 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1415 | int page_count, i; | 1882 | int page_count, i; |
@@ -1418,18 +1885,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) | |||
1418 | struct page *page; | 1885 | struct page *page; |
1419 | int ret; | 1886 | int ret; |
1420 | 1887 | ||
1421 | if (obj_priv->page_list) | 1888 | if (obj_priv->pages_refcount++ != 0) |
1422 | return 0; | 1889 | return 0; |
1423 | 1890 | ||
1424 | /* Get the list of pages out of our struct file. They'll be pinned | 1891 | /* Get the list of pages out of our struct file. They'll be pinned |
1425 | * at this point until we release them. | 1892 | * at this point until we release them. |
1426 | */ | 1893 | */ |
1427 | page_count = obj->size / PAGE_SIZE; | 1894 | page_count = obj->size / PAGE_SIZE; |
1428 | BUG_ON(obj_priv->page_list != NULL); | 1895 | BUG_ON(obj_priv->pages != NULL); |
1429 | obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), | 1896 | obj_priv->pages = drm_calloc(page_count, sizeof(struct page *), |
1430 | DRM_MEM_DRIVER); | 1897 | DRM_MEM_DRIVER); |
1431 | if (obj_priv->page_list == NULL) { | 1898 | if (obj_priv->pages == NULL) { |
1432 | DRM_ERROR("Faled to allocate page list\n"); | 1899 | DRM_ERROR("Faled to allocate page list\n"); |
1900 | obj_priv->pages_refcount--; | ||
1433 | return -ENOMEM; | 1901 | return -ENOMEM; |
1434 | } | 1902 | } |
1435 | 1903 | ||
@@ -1440,10 +1908,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) | |||
1440 | if (IS_ERR(page)) { | 1908 | if (IS_ERR(page)) { |
1441 | ret = PTR_ERR(page); | 1909 | ret = PTR_ERR(page); |
1442 | DRM_ERROR("read_mapping_page failed: %d\n", ret); | 1910 | DRM_ERROR("read_mapping_page failed: %d\n", ret); |
1443 | i915_gem_object_free_page_list(obj); | 1911 | i915_gem_object_put_pages(obj); |
1444 | return ret; | 1912 | return ret; |
1445 | } | 1913 | } |
1446 | obj_priv->page_list[i] = page; | 1914 | obj_priv->pages[i] = page; |
1447 | } | 1915 | } |
1448 | return 0; | 1916 | return 0; |
1449 | } | 1917 | } |
@@ -1766,7 +2234,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
1766 | DRM_INFO("Binding object of size %d at 0x%08x\n", | 2234 | DRM_INFO("Binding object of size %d at 0x%08x\n", |
1767 | obj->size, obj_priv->gtt_offset); | 2235 | obj->size, obj_priv->gtt_offset); |
1768 | #endif | 2236 | #endif |
1769 | ret = i915_gem_object_get_page_list(obj); | 2237 | ret = i915_gem_object_get_pages(obj); |
1770 | if (ret) { | 2238 | if (ret) { |
1771 | drm_mm_put_block(obj_priv->gtt_space); | 2239 | drm_mm_put_block(obj_priv->gtt_space); |
1772 | obj_priv->gtt_space = NULL; | 2240 | obj_priv->gtt_space = NULL; |
@@ -1778,12 +2246,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
1778 | * into the GTT. | 2246 | * into the GTT. |
1779 | */ | 2247 | */ |
1780 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 2248 | obj_priv->agp_mem = drm_agp_bind_pages(dev, |
1781 | obj_priv->page_list, | 2249 | obj_priv->pages, |
1782 | page_count, | 2250 | page_count, |
1783 | obj_priv->gtt_offset, | 2251 | obj_priv->gtt_offset, |
1784 | obj_priv->agp_type); | 2252 | obj_priv->agp_type); |
1785 | if (obj_priv->agp_mem == NULL) { | 2253 | if (obj_priv->agp_mem == NULL) { |
1786 | i915_gem_object_free_page_list(obj); | 2254 | i915_gem_object_put_pages(obj); |
1787 | drm_mm_put_block(obj_priv->gtt_space); | 2255 | drm_mm_put_block(obj_priv->gtt_space); |
1788 | obj_priv->gtt_space = NULL; | 2256 | obj_priv->gtt_space = NULL; |
1789 | return -ENOMEM; | 2257 | return -ENOMEM; |
@@ -1810,10 +2278,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
1810 | * to GPU, and we can ignore the cache flush because it'll happen | 2278 | * to GPU, and we can ignore the cache flush because it'll happen |
1811 | * again at bind time. | 2279 | * again at bind time. |
1812 | */ | 2280 | */ |
1813 | if (obj_priv->page_list == NULL) | 2281 | if (obj_priv->pages == NULL) |
1814 | return; | 2282 | return; |
1815 | 2283 | ||
1816 | drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); | 2284 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); |
1817 | } | 2285 | } |
1818 | 2286 | ||
1819 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2287 | /** Flushes any GPU write domain for the object if it's dirty. */ |
@@ -2155,7 +2623,7 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | |||
2155 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { | 2623 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { |
2156 | if (obj_priv->page_cpu_valid[i]) | 2624 | if (obj_priv->page_cpu_valid[i]) |
2157 | continue; | 2625 | continue; |
2158 | drm_clflush_pages(obj_priv->page_list + i, 1); | 2626 | drm_clflush_pages(obj_priv->pages + i, 1); |
2159 | } | 2627 | } |
2160 | } | 2628 | } |
2161 | 2629 | ||
@@ -2220,7 +2688,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
2220 | if (obj_priv->page_cpu_valid[i]) | 2688 | if (obj_priv->page_cpu_valid[i]) |
2221 | continue; | 2689 | continue; |
2222 | 2690 | ||
2223 | drm_clflush_pages(obj_priv->page_list + i, 1); | 2691 | drm_clflush_pages(obj_priv->pages + i, 1); |
2224 | 2692 | ||
2225 | obj_priv->page_cpu_valid[i] = 1; | 2693 | obj_priv->page_cpu_valid[i] = 1; |
2226 | } | 2694 | } |
@@ -2241,12 +2709,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
2241 | static int | 2709 | static int |
2242 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | 2710 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, |
2243 | struct drm_file *file_priv, | 2711 | struct drm_file *file_priv, |
2244 | struct drm_i915_gem_exec_object *entry) | 2712 | struct drm_i915_gem_exec_object *entry, |
2713 | struct drm_i915_gem_relocation_entry *relocs) | ||
2245 | { | 2714 | { |
2246 | struct drm_device *dev = obj->dev; | 2715 | struct drm_device *dev = obj->dev; |
2247 | drm_i915_private_t *dev_priv = dev->dev_private; | 2716 | drm_i915_private_t *dev_priv = dev->dev_private; |
2248 | struct drm_i915_gem_relocation_entry reloc; | ||
2249 | struct drm_i915_gem_relocation_entry __user *relocs; | ||
2250 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2717 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2251 | int i, ret; | 2718 | int i, ret; |
2252 | void __iomem *reloc_page; | 2719 | void __iomem *reloc_page; |
@@ -2258,25 +2725,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2258 | 2725 | ||
2259 | entry->offset = obj_priv->gtt_offset; | 2726 | entry->offset = obj_priv->gtt_offset; |
2260 | 2727 | ||
2261 | relocs = (struct drm_i915_gem_relocation_entry __user *) | ||
2262 | (uintptr_t) entry->relocs_ptr; | ||
2263 | /* Apply the relocations, using the GTT aperture to avoid cache | 2728 | /* Apply the relocations, using the GTT aperture to avoid cache |
2264 | * flushing requirements. | 2729 | * flushing requirements. |
2265 | */ | 2730 | */ |
2266 | for (i = 0; i < entry->relocation_count; i++) { | 2731 | for (i = 0; i < entry->relocation_count; i++) { |
2732 | struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; | ||
2267 | struct drm_gem_object *target_obj; | 2733 | struct drm_gem_object *target_obj; |
2268 | struct drm_i915_gem_object *target_obj_priv; | 2734 | struct drm_i915_gem_object *target_obj_priv; |
2269 | uint32_t reloc_val, reloc_offset; | 2735 | uint32_t reloc_val, reloc_offset; |
2270 | uint32_t __iomem *reloc_entry; | 2736 | uint32_t __iomem *reloc_entry; |
2271 | 2737 | ||
2272 | ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); | ||
2273 | if (ret != 0) { | ||
2274 | i915_gem_object_unpin(obj); | ||
2275 | return ret; | ||
2276 | } | ||
2277 | |||
2278 | target_obj = drm_gem_object_lookup(obj->dev, file_priv, | 2738 | target_obj = drm_gem_object_lookup(obj->dev, file_priv, |
2279 | reloc.target_handle); | 2739 | reloc->target_handle); |
2280 | if (target_obj == NULL) { | 2740 | if (target_obj == NULL) { |
2281 | i915_gem_object_unpin(obj); | 2741 | i915_gem_object_unpin(obj); |
2282 | return -EBADF; | 2742 | return -EBADF; |
@@ -2288,53 +2748,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2288 | */ | 2748 | */ |
2289 | if (target_obj_priv->gtt_space == NULL) { | 2749 | if (target_obj_priv->gtt_space == NULL) { |
2290 | DRM_ERROR("No GTT space found for object %d\n", | 2750 | DRM_ERROR("No GTT space found for object %d\n", |
2291 | reloc.target_handle); | 2751 | reloc->target_handle); |
2292 | drm_gem_object_unreference(target_obj); | 2752 | drm_gem_object_unreference(target_obj); |
2293 | i915_gem_object_unpin(obj); | 2753 | i915_gem_object_unpin(obj); |
2294 | return -EINVAL; | 2754 | return -EINVAL; |
2295 | } | 2755 | } |
2296 | 2756 | ||
2297 | if (reloc.offset > obj->size - 4) { | 2757 | if (reloc->offset > obj->size - 4) { |
2298 | DRM_ERROR("Relocation beyond object bounds: " | 2758 | DRM_ERROR("Relocation beyond object bounds: " |
2299 | "obj %p target %d offset %d size %d.\n", | 2759 | "obj %p target %d offset %d size %d.\n", |
2300 | obj, reloc.target_handle, | 2760 | obj, reloc->target_handle, |
2301 | (int) reloc.offset, (int) obj->size); | 2761 | (int) reloc->offset, (int) obj->size); |
2302 | drm_gem_object_unreference(target_obj); | 2762 | drm_gem_object_unreference(target_obj); |
2303 | i915_gem_object_unpin(obj); | 2763 | i915_gem_object_unpin(obj); |
2304 | return -EINVAL; | 2764 | return -EINVAL; |
2305 | } | 2765 | } |
2306 | if (reloc.offset & 3) { | 2766 | if (reloc->offset & 3) { |
2307 | DRM_ERROR("Relocation not 4-byte aligned: " | 2767 | DRM_ERROR("Relocation not 4-byte aligned: " |
2308 | "obj %p target %d offset %d.\n", | 2768 | "obj %p target %d offset %d.\n", |
2309 | obj, reloc.target_handle, | 2769 | obj, reloc->target_handle, |
2310 | (int) reloc.offset); | 2770 | (int) reloc->offset); |
2311 | drm_gem_object_unreference(target_obj); | 2771 | drm_gem_object_unreference(target_obj); |
2312 | i915_gem_object_unpin(obj); | 2772 | i915_gem_object_unpin(obj); |
2313 | return -EINVAL; | 2773 | return -EINVAL; |
2314 | } | 2774 | } |
2315 | 2775 | ||
2316 | if (reloc.write_domain & I915_GEM_DOMAIN_CPU || | 2776 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || |
2317 | reloc.read_domains & I915_GEM_DOMAIN_CPU) { | 2777 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { |
2318 | DRM_ERROR("reloc with read/write CPU domains: " | 2778 | DRM_ERROR("reloc with read/write CPU domains: " |
2319 | "obj %p target %d offset %d " | 2779 | "obj %p target %d offset %d " |
2320 | "read %08x write %08x", | 2780 | "read %08x write %08x", |
2321 | obj, reloc.target_handle, | 2781 | obj, reloc->target_handle, |
2322 | (int) reloc.offset, | 2782 | (int) reloc->offset, |
2323 | reloc.read_domains, | 2783 | reloc->read_domains, |
2324 | reloc.write_domain); | 2784 | reloc->write_domain); |
2325 | drm_gem_object_unreference(target_obj); | 2785 | drm_gem_object_unreference(target_obj); |
2326 | i915_gem_object_unpin(obj); | 2786 | i915_gem_object_unpin(obj); |
2327 | return -EINVAL; | 2787 | return -EINVAL; |
2328 | } | 2788 | } |
2329 | 2789 | ||
2330 | if (reloc.write_domain && target_obj->pending_write_domain && | 2790 | if (reloc->write_domain && target_obj->pending_write_domain && |
2331 | reloc.write_domain != target_obj->pending_write_domain) { | 2791 | reloc->write_domain != target_obj->pending_write_domain) { |
2332 | DRM_ERROR("Write domain conflict: " | 2792 | DRM_ERROR("Write domain conflict: " |
2333 | "obj %p target %d offset %d " | 2793 | "obj %p target %d offset %d " |
2334 | "new %08x old %08x\n", | 2794 | "new %08x old %08x\n", |
2335 | obj, reloc.target_handle, | 2795 | obj, reloc->target_handle, |
2336 | (int) reloc.offset, | 2796 | (int) reloc->offset, |
2337 | reloc.write_domain, | 2797 | reloc->write_domain, |
2338 | target_obj->pending_write_domain); | 2798 | target_obj->pending_write_domain); |
2339 | drm_gem_object_unreference(target_obj); | 2799 | drm_gem_object_unreference(target_obj); |
2340 | i915_gem_object_unpin(obj); | 2800 | i915_gem_object_unpin(obj); |
@@ -2347,22 +2807,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2347 | "presumed %08x delta %08x\n", | 2807 | "presumed %08x delta %08x\n", |
2348 | __func__, | 2808 | __func__, |
2349 | obj, | 2809 | obj, |
2350 | (int) reloc.offset, | 2810 | (int) reloc->offset, |
2351 | (int) reloc.target_handle, | 2811 | (int) reloc->target_handle, |
2352 | (int) reloc.read_domains, | 2812 | (int) reloc->read_domains, |
2353 | (int) reloc.write_domain, | 2813 | (int) reloc->write_domain, |
2354 | (int) target_obj_priv->gtt_offset, | 2814 | (int) target_obj_priv->gtt_offset, |
2355 | (int) reloc.presumed_offset, | 2815 | (int) reloc->presumed_offset, |
2356 | reloc.delta); | 2816 | reloc->delta); |
2357 | #endif | 2817 | #endif |
2358 | 2818 | ||
2359 | target_obj->pending_read_domains |= reloc.read_domains; | 2819 | target_obj->pending_read_domains |= reloc->read_domains; |
2360 | target_obj->pending_write_domain |= reloc.write_domain; | 2820 | target_obj->pending_write_domain |= reloc->write_domain; |
2361 | 2821 | ||
2362 | /* If the relocation already has the right value in it, no | 2822 | /* If the relocation already has the right value in it, no |
2363 | * more work needs to be done. | 2823 | * more work needs to be done. |
2364 | */ | 2824 | */ |
2365 | if (target_obj_priv->gtt_offset == reloc.presumed_offset) { | 2825 | if (target_obj_priv->gtt_offset == reloc->presumed_offset) { |
2366 | drm_gem_object_unreference(target_obj); | 2826 | drm_gem_object_unreference(target_obj); |
2367 | continue; | 2827 | continue; |
2368 | } | 2828 | } |
@@ -2377,32 +2837,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2377 | /* Map the page containing the relocation we're going to | 2837 | /* Map the page containing the relocation we're going to |
2378 | * perform. | 2838 | * perform. |
2379 | */ | 2839 | */ |
2380 | reloc_offset = obj_priv->gtt_offset + reloc.offset; | 2840 | reloc_offset = obj_priv->gtt_offset + reloc->offset; |
2381 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 2841 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, |
2382 | (reloc_offset & | 2842 | (reloc_offset & |
2383 | ~(PAGE_SIZE - 1))); | 2843 | ~(PAGE_SIZE - 1))); |
2384 | reloc_entry = (uint32_t __iomem *)(reloc_page + | 2844 | reloc_entry = (uint32_t __iomem *)(reloc_page + |
2385 | (reloc_offset & (PAGE_SIZE - 1))); | 2845 | (reloc_offset & (PAGE_SIZE - 1))); |
2386 | reloc_val = target_obj_priv->gtt_offset + reloc.delta; | 2846 | reloc_val = target_obj_priv->gtt_offset + reloc->delta; |
2387 | 2847 | ||
2388 | #if WATCH_BUF | 2848 | #if WATCH_BUF |
2389 | DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", | 2849 | DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", |
2390 | obj, (unsigned int) reloc.offset, | 2850 | obj, (unsigned int) reloc->offset, |
2391 | readl(reloc_entry), reloc_val); | 2851 | readl(reloc_entry), reloc_val); |
2392 | #endif | 2852 | #endif |
2393 | writel(reloc_val, reloc_entry); | 2853 | writel(reloc_val, reloc_entry); |
2394 | io_mapping_unmap_atomic(reloc_page); | 2854 | io_mapping_unmap_atomic(reloc_page); |
2395 | 2855 | ||
2396 | /* Write the updated presumed offset for this entry back out | 2856 | /* The updated presumed offset for this entry will be |
2397 | * to the user. | 2857 | * copied back out to the user. |
2398 | */ | 2858 | */ |
2399 | reloc.presumed_offset = target_obj_priv->gtt_offset; | 2859 | reloc->presumed_offset = target_obj_priv->gtt_offset; |
2400 | ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); | ||
2401 | if (ret != 0) { | ||
2402 | drm_gem_object_unreference(target_obj); | ||
2403 | i915_gem_object_unpin(obj); | ||
2404 | return ret; | ||
2405 | } | ||
2406 | 2860 | ||
2407 | drm_gem_object_unreference(target_obj); | 2861 | drm_gem_object_unreference(target_obj); |
2408 | } | 2862 | } |
@@ -2419,11 +2873,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2419 | static int | 2873 | static int |
2420 | i915_dispatch_gem_execbuffer(struct drm_device *dev, | 2874 | i915_dispatch_gem_execbuffer(struct drm_device *dev, |
2421 | struct drm_i915_gem_execbuffer *exec, | 2875 | struct drm_i915_gem_execbuffer *exec, |
2876 | struct drm_clip_rect *cliprects, | ||
2422 | uint64_t exec_offset) | 2877 | uint64_t exec_offset) |
2423 | { | 2878 | { |
2424 | drm_i915_private_t *dev_priv = dev->dev_private; | 2879 | drm_i915_private_t *dev_priv = dev->dev_private; |
2425 | struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *) | ||
2426 | (uintptr_t) exec->cliprects_ptr; | ||
2427 | int nbox = exec->num_cliprects; | 2880 | int nbox = exec->num_cliprects; |
2428 | int i = 0, count; | 2881 | int i = 0, count; |
2429 | uint32_t exec_start, exec_len; | 2882 | uint32_t exec_start, exec_len; |
@@ -2444,7 +2897,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, | |||
2444 | 2897 | ||
2445 | for (i = 0; i < count; i++) { | 2898 | for (i = 0; i < count; i++) { |
2446 | if (i < nbox) { | 2899 | if (i < nbox) { |
2447 | int ret = i915_emit_box(dev, boxes, i, | 2900 | int ret = i915_emit_box(dev, cliprects, i, |
2448 | exec->DR1, exec->DR4); | 2901 | exec->DR1, exec->DR4); |
2449 | if (ret) | 2902 | if (ret) |
2450 | return ret; | 2903 | return ret; |
@@ -2500,6 +2953,75 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | |||
2500 | return ret; | 2953 | return ret; |
2501 | } | 2954 | } |
2502 | 2955 | ||
2956 | static int | ||
2957 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | ||
2958 | uint32_t buffer_count, | ||
2959 | struct drm_i915_gem_relocation_entry **relocs) | ||
2960 | { | ||
2961 | uint32_t reloc_count = 0, reloc_index = 0, i; | ||
2962 | int ret; | ||
2963 | |||
2964 | *relocs = NULL; | ||
2965 | for (i = 0; i < buffer_count; i++) { | ||
2966 | if (reloc_count + exec_list[i].relocation_count < reloc_count) | ||
2967 | return -EINVAL; | ||
2968 | reloc_count += exec_list[i].relocation_count; | ||
2969 | } | ||
2970 | |||
2971 | *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER); | ||
2972 | if (*relocs == NULL) | ||
2973 | return -ENOMEM; | ||
2974 | |||
2975 | for (i = 0; i < buffer_count; i++) { | ||
2976 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
2977 | |||
2978 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | ||
2979 | |||
2980 | ret = copy_from_user(&(*relocs)[reloc_index], | ||
2981 | user_relocs, | ||
2982 | exec_list[i].relocation_count * | ||
2983 | sizeof(**relocs)); | ||
2984 | if (ret != 0) { | ||
2985 | drm_free(*relocs, reloc_count * sizeof(**relocs), | ||
2986 | DRM_MEM_DRIVER); | ||
2987 | *relocs = NULL; | ||
2988 | return ret; | ||
2989 | } | ||
2990 | |||
2991 | reloc_index += exec_list[i].relocation_count; | ||
2992 | } | ||
2993 | |||
2994 | return ret; | ||
2995 | } | ||
2996 | |||
2997 | static int | ||
2998 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, | ||
2999 | uint32_t buffer_count, | ||
3000 | struct drm_i915_gem_relocation_entry *relocs) | ||
3001 | { | ||
3002 | uint32_t reloc_count = 0, i; | ||
3003 | int ret; | ||
3004 | |||
3005 | for (i = 0; i < buffer_count; i++) { | ||
3006 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
3007 | |||
3008 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | ||
3009 | |||
3010 | if (ret == 0) { | ||
3011 | ret = copy_to_user(user_relocs, | ||
3012 | &relocs[reloc_count], | ||
3013 | exec_list[i].relocation_count * | ||
3014 | sizeof(*relocs)); | ||
3015 | } | ||
3016 | |||
3017 | reloc_count += exec_list[i].relocation_count; | ||
3018 | } | ||
3019 | |||
3020 | drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); | ||
3021 | |||
3022 | return ret; | ||
3023 | } | ||
3024 | |||
2503 | int | 3025 | int |
2504 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 3026 | i915_gem_execbuffer(struct drm_device *dev, void *data, |
2505 | struct drm_file *file_priv) | 3027 | struct drm_file *file_priv) |
@@ -2511,9 +3033,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2511 | struct drm_gem_object **object_list = NULL; | 3033 | struct drm_gem_object **object_list = NULL; |
2512 | struct drm_gem_object *batch_obj; | 3034 | struct drm_gem_object *batch_obj; |
2513 | struct drm_i915_gem_object *obj_priv; | 3035 | struct drm_i915_gem_object *obj_priv; |
2514 | int ret, i, pinned = 0; | 3036 | struct drm_clip_rect *cliprects = NULL; |
3037 | struct drm_i915_gem_relocation_entry *relocs; | ||
3038 | int ret, ret2, i, pinned = 0; | ||
2515 | uint64_t exec_offset; | 3039 | uint64_t exec_offset; |
2516 | uint32_t seqno, flush_domains; | 3040 | uint32_t seqno, flush_domains, reloc_index; |
2517 | int pin_tries; | 3041 | int pin_tries; |
2518 | 3042 | ||
2519 | #if WATCH_EXEC | 3043 | #if WATCH_EXEC |
@@ -2547,6 +3071,28 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2547 | goto pre_mutex_err; | 3071 | goto pre_mutex_err; |
2548 | } | 3072 | } |
2549 | 3073 | ||
3074 | if (args->num_cliprects != 0) { | ||
3075 | cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects), | ||
3076 | DRM_MEM_DRIVER); | ||
3077 | if (cliprects == NULL) | ||
3078 | goto pre_mutex_err; | ||
3079 | |||
3080 | ret = copy_from_user(cliprects, | ||
3081 | (struct drm_clip_rect __user *) | ||
3082 | (uintptr_t) args->cliprects_ptr, | ||
3083 | sizeof(*cliprects) * args->num_cliprects); | ||
3084 | if (ret != 0) { | ||
3085 | DRM_ERROR("copy %d cliprects failed: %d\n", | ||
3086 | args->num_cliprects, ret); | ||
3087 | goto pre_mutex_err; | ||
3088 | } | ||
3089 | } | ||
3090 | |||
3091 | ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, | ||
3092 | &relocs); | ||
3093 | if (ret != 0) | ||
3094 | goto pre_mutex_err; | ||
3095 | |||
2550 | mutex_lock(&dev->struct_mutex); | 3096 | mutex_lock(&dev->struct_mutex); |
2551 | 3097 | ||
2552 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3098 | i915_verify_inactive(dev, __FILE__, __LINE__); |
@@ -2589,15 +3135,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2589 | /* Pin and relocate */ | 3135 | /* Pin and relocate */ |
2590 | for (pin_tries = 0; ; pin_tries++) { | 3136 | for (pin_tries = 0; ; pin_tries++) { |
2591 | ret = 0; | 3137 | ret = 0; |
3138 | reloc_index = 0; | ||
3139 | |||
2592 | for (i = 0; i < args->buffer_count; i++) { | 3140 | for (i = 0; i < args->buffer_count; i++) { |
2593 | object_list[i]->pending_read_domains = 0; | 3141 | object_list[i]->pending_read_domains = 0; |
2594 | object_list[i]->pending_write_domain = 0; | 3142 | object_list[i]->pending_write_domain = 0; |
2595 | ret = i915_gem_object_pin_and_relocate(object_list[i], | 3143 | ret = i915_gem_object_pin_and_relocate(object_list[i], |
2596 | file_priv, | 3144 | file_priv, |
2597 | &exec_list[i]); | 3145 | &exec_list[i], |
3146 | &relocs[reloc_index]); | ||
2598 | if (ret) | 3147 | if (ret) |
2599 | break; | 3148 | break; |
2600 | pinned = i + 1; | 3149 | pinned = i + 1; |
3150 | reloc_index += exec_list[i].relocation_count; | ||
2601 | } | 3151 | } |
2602 | /* success */ | 3152 | /* success */ |
2603 | if (ret == 0) | 3153 | if (ret == 0) |
@@ -2683,7 +3233,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2683 | #endif | 3233 | #endif |
2684 | 3234 | ||
2685 | /* Exec the batchbuffer */ | 3235 | /* Exec the batchbuffer */ |
2686 | ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); | 3236 | ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); |
2687 | if (ret) { | 3237 | if (ret) { |
2688 | DRM_ERROR("dispatch failed %d\n", ret); | 3238 | DRM_ERROR("dispatch failed %d\n", ret); |
2689 | goto err; | 3239 | goto err; |
@@ -2747,11 +3297,27 @@ err: | |||
2747 | args->buffer_count, ret); | 3297 | args->buffer_count, ret); |
2748 | } | 3298 | } |
2749 | 3299 | ||
3300 | /* Copy the updated relocations out regardless of current error | ||
3301 | * state. Failure to update the relocs would mean that the next | ||
3302 | * time userland calls execbuf, it would do so with presumed offset | ||
3303 | * state that didn't match the actual object state. | ||
3304 | */ | ||
3305 | ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, | ||
3306 | relocs); | ||
3307 | if (ret2 != 0) { | ||
3308 | DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); | ||
3309 | |||
3310 | if (ret == 0) | ||
3311 | ret = ret2; | ||
3312 | } | ||
3313 | |||
2750 | pre_mutex_err: | 3314 | pre_mutex_err: |
2751 | drm_free(object_list, sizeof(*object_list) * args->buffer_count, | 3315 | drm_free(object_list, sizeof(*object_list) * args->buffer_count, |
2752 | DRM_MEM_DRIVER); | 3316 | DRM_MEM_DRIVER); |
2753 | drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, | 3317 | drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, |
2754 | DRM_MEM_DRIVER); | 3318 | DRM_MEM_DRIVER); |
3319 | drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, | ||
3320 | DRM_MEM_DRIVER); | ||
2755 | 3321 | ||
2756 | return ret; | 3322 | return ret; |
2757 | } | 3323 | } |
@@ -3188,7 +3754,7 @@ i915_gem_init_hws(struct drm_device *dev) | |||
3188 | 3754 | ||
3189 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; | 3755 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; |
3190 | 3756 | ||
3191 | dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); | 3757 | dev_priv->hw_status_page = kmap(obj_priv->pages[0]); |
3192 | if (dev_priv->hw_status_page == NULL) { | 3758 | if (dev_priv->hw_status_page == NULL) { |
3193 | DRM_ERROR("Failed to map status page.\n"); | 3759 | DRM_ERROR("Failed to map status page.\n"); |
3194 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 3760 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
@@ -3218,7 +3784,7 @@ i915_gem_cleanup_hws(struct drm_device *dev) | |||
3218 | obj = dev_priv->hws_obj; | 3784 | obj = dev_priv->hws_obj; |
3219 | obj_priv = obj->driver_private; | 3785 | obj_priv = obj->driver_private; |
3220 | 3786 | ||
3221 | kunmap(obj_priv->page_list[0]); | 3787 | kunmap(obj_priv->pages[0]); |
3222 | i915_gem_object_unpin(obj); | 3788 | i915_gem_object_unpin(obj); |
3223 | drm_gem_object_unreference(obj); | 3789 | drm_gem_object_unreference(obj); |
3224 | dev_priv->hws_obj = NULL; | 3790 | dev_priv->hws_obj = NULL; |
@@ -3521,20 +4087,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
3521 | if (!obj_priv->phys_obj) | 4087 | if (!obj_priv->phys_obj) |
3522 | return; | 4088 | return; |
3523 | 4089 | ||
3524 | ret = i915_gem_object_get_page_list(obj); | 4090 | ret = i915_gem_object_get_pages(obj); |
3525 | if (ret) | 4091 | if (ret) |
3526 | goto out; | 4092 | goto out; |
3527 | 4093 | ||
3528 | page_count = obj->size / PAGE_SIZE; | 4094 | page_count = obj->size / PAGE_SIZE; |
3529 | 4095 | ||
3530 | for (i = 0; i < page_count; i++) { | 4096 | for (i = 0; i < page_count; i++) { |
3531 | char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); | 4097 | char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); |
3532 | char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 4098 | char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); |
3533 | 4099 | ||
3534 | memcpy(dst, src, PAGE_SIZE); | 4100 | memcpy(dst, src, PAGE_SIZE); |
3535 | kunmap_atomic(dst, KM_USER0); | 4101 | kunmap_atomic(dst, KM_USER0); |
3536 | } | 4102 | } |
3537 | drm_clflush_pages(obj_priv->page_list, page_count); | 4103 | drm_clflush_pages(obj_priv->pages, page_count); |
3538 | drm_agp_chipset_flush(dev); | 4104 | drm_agp_chipset_flush(dev); |
3539 | out: | 4105 | out: |
3540 | obj_priv->phys_obj->cur_obj = NULL; | 4106 | obj_priv->phys_obj->cur_obj = NULL; |
@@ -3577,7 +4143,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
3577 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 4143 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
3578 | obj_priv->phys_obj->cur_obj = obj; | 4144 | obj_priv->phys_obj->cur_obj = obj; |
3579 | 4145 | ||
3580 | ret = i915_gem_object_get_page_list(obj); | 4146 | ret = i915_gem_object_get_pages(obj); |
3581 | if (ret) { | 4147 | if (ret) { |
3582 | DRM_ERROR("failed to get page list\n"); | 4148 | DRM_ERROR("failed to get page list\n"); |
3583 | goto out; | 4149 | goto out; |
@@ -3586,7 +4152,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
3586 | page_count = obj->size / PAGE_SIZE; | 4152 | page_count = obj->size / PAGE_SIZE; |
3587 | 4153 | ||
3588 | for (i = 0; i < page_count; i++) { | 4154 | for (i = 0; i < page_count; i++) { |
3589 | char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); | 4155 | char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); |
3590 | char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 4156 | char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); |
3591 | 4157 | ||
3592 | memcpy(dst, src, PAGE_SIZE); | 4158 | memcpy(dst, src, PAGE_SIZE); |
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index 5a4cdb5d2871..455ec970b385 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c | |||
@@ -192,7 +192,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) | |||
192 | 192 | ||
193 | obj_priv = obj->driver_private; | 193 | obj_priv = obj->driver_private; |
194 | seq_printf(m, "Fenced object[%2d] = %p: %s " | 194 | seq_printf(m, "Fenced object[%2d] = %p: %s " |
195 | "%08x %08x %08x %s %08x %08x %d", | 195 | "%08x %08zx %08x %s %08x %08x %d", |
196 | i, obj, get_pin_flag(obj_priv), | 196 | i, obj, get_pin_flag(obj_priv), |
197 | obj_priv->gtt_offset, | 197 | obj_priv->gtt_offset, |
198 | obj->size, obj_priv->stride, | 198 | obj->size, obj_priv->stride, |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 7fb4191ef934..4cce1aef438e 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
96 | */ | 96 | */ |
97 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | 97 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
98 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | 98 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
99 | } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) || | 99 | } else if (IS_MOBILE(dev)) { |
100 | IS_GM45(dev)) { | ||
101 | uint32_t dcc; | 100 | uint32_t dcc; |
102 | 101 | ||
103 | /* On 915-945 and GM965, channel interleave by the CPU is | 102 | /* On mobile 9xx chipsets, channel interleave by the CPU is |
104 | * determined by DCC. The CPU will alternate based on bit 6 | 103 | * determined by DCC. For single-channel, neither the CPU |
105 | * in interleaved mode, and the GPU will then also alternate | 104 | * nor the GPU do swizzling. For dual channel interleaved, |
106 | * on bit 6, 9, and 10 for X, but the CPU may also optionally | 105 | * the GPU's interleave is bit 9 and 10 for X tiled, and bit |
107 | * alternate based on bit 17 (XOR not disabled and XOR | 106 | * 9 for Y tiled. The CPU's interleave is independent, and |
108 | * bit == 17). | 107 | * can be based on either bit 11 (haven't seen this yet) or |
108 | * bit 17 (common). | ||
109 | */ | 109 | */ |
110 | dcc = I915_READ(DCC); | 110 | dcc = I915_READ(DCC); |
111 | switch (dcc & DCC_ADDRESSING_MODE_MASK) { | 111 | switch (dcc & DCC_ADDRESSING_MODE_MASK) { |
@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
115 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | 115 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
116 | break; | 116 | break; |
117 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: | 117 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: |
118 | if (IS_I915G(dev) || IS_I915GM(dev) || | 118 | if (dcc & DCC_CHANNEL_XOR_DISABLE) { |
119 | dcc & DCC_CHANNEL_XOR_DISABLE) { | 119 | /* This is the base swizzling by the GPU for |
120 | * tiled buffers. | ||
121 | */ | ||
120 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | 122 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
121 | swizzle_y = I915_BIT_6_SWIZZLE_9; | 123 | swizzle_y = I915_BIT_6_SWIZZLE_9; |
122 | } else if ((IS_I965GM(dev) || IS_GM45(dev)) && | 124 | } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { |
123 | (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { | 125 | /* Bit 11 swizzling by the CPU in addition. */ |
124 | /* GM965/GM45 does either bit 11 or bit 17 | ||
125 | * swizzling. | ||
126 | */ | ||
127 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; | 126 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; |
128 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; | 127 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; |
129 | } else { | 128 | } else { |
130 | /* Bit 17 or perhaps other swizzling */ | 129 | /* Bit 17 swizzling by the CPU in addition. */ |
131 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | 130 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
132 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 131 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
133 | } | 132 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 90600d899413..377cc588f5e9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -359,6 +359,7 @@ | |||
359 | #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ | 359 | #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ |
360 | #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ | 360 | #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ |
361 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ | 361 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ |
362 | #define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */ | ||
362 | 363 | ||
363 | #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) | 364 | #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) |
364 | #define I915_CRC_ERROR_ENABLE (1UL<<29) | 365 | #define I915_CRC_ERROR_ENABLE (1UL<<29) |
@@ -435,6 +436,7 @@ | |||
435 | */ | 436 | */ |
436 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 | 437 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 |
437 | #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 | 438 | #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 |
439 | #define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15 | ||
438 | /* i830, required in DVO non-gang */ | 440 | /* i830, required in DVO non-gang */ |
439 | #define PLL_P2_DIVIDE_BY_4 (1 << 23) | 441 | #define PLL_P2_DIVIDE_BY_4 (1 << 23) |
440 | #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ | 442 | #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ |
@@ -501,10 +503,12 @@ | |||
501 | #define FPB0 0x06048 | 503 | #define FPB0 0x06048 |
502 | #define FPB1 0x0604c | 504 | #define FPB1 0x0604c |
503 | #define FP_N_DIV_MASK 0x003f0000 | 505 | #define FP_N_DIV_MASK 0x003f0000 |
506 | #define FP_N_IGD_DIV_MASK 0x00ff0000 | ||
504 | #define FP_N_DIV_SHIFT 16 | 507 | #define FP_N_DIV_SHIFT 16 |
505 | #define FP_M1_DIV_MASK 0x00003f00 | 508 | #define FP_M1_DIV_MASK 0x00003f00 |
506 | #define FP_M1_DIV_SHIFT 8 | 509 | #define FP_M1_DIV_SHIFT 8 |
507 | #define FP_M2_DIV_MASK 0x0000003f | 510 | #define FP_M2_DIV_MASK 0x0000003f |
511 | #define FP_M2_IGD_DIV_MASK 0x000000ff | ||
508 | #define FP_M2_DIV_SHIFT 0 | 512 | #define FP_M2_DIV_SHIFT 0 |
509 | #define DPLL_TEST 0x606c | 513 | #define DPLL_TEST 0x606c |
510 | #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) | 514 | #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) |
@@ -629,6 +633,22 @@ | |||
629 | #define TV_HOTPLUG_INT_EN (1 << 18) | 633 | #define TV_HOTPLUG_INT_EN (1 << 18) |
630 | #define CRT_HOTPLUG_INT_EN (1 << 9) | 634 | #define CRT_HOTPLUG_INT_EN (1 << 9) |
631 | #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) | 635 | #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) |
636 | #define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) | ||
637 | /* must use period 64 on GM45 according to docs */ | ||
638 | #define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8) | ||
639 | #define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7) | ||
640 | #define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7) | ||
641 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5) | ||
642 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5) | ||
643 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5) | ||
644 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5) | ||
645 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5) | ||
646 | #define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4) | ||
647 | #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) | ||
648 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) | ||
649 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | ||
650 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ | ||
651 | |||
632 | 652 | ||
633 | #define PORT_HOTPLUG_STAT 0x61114 | 653 | #define PORT_HOTPLUG_STAT 0x61114 |
634 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) | 654 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) |
@@ -856,7 +876,7 @@ | |||
856 | */ | 876 | */ |
857 | # define TV_ENC_C0_FIX (1 << 10) | 877 | # define TV_ENC_C0_FIX (1 << 10) |
858 | /** Bits that must be preserved by software */ | 878 | /** Bits that must be preserved by software */ |
859 | # define TV_CTL_SAVE ((3 << 8) | (3 << 6)) | 879 | # define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) |
860 | # define TV_FUSE_STATE_MASK (3 << 4) | 880 | # define TV_FUSE_STATE_MASK (3 << 4) |
861 | /** Read-only state that reports all features enabled */ | 881 | /** Read-only state that reports all features enabled */ |
862 | # define TV_FUSE_STATE_ENABLED (0 << 4) | 882 | # define TV_FUSE_STATE_ENABLED (0 << 4) |
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 5ea715ace3a0..de621aad85b5 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -162,13 +162,13 @@ struct bdb_lvds_options { | |||
162 | u8 panel_type; | 162 | u8 panel_type; |
163 | u8 rsvd1; | 163 | u8 rsvd1; |
164 | /* LVDS capabilities, stored in a dword */ | 164 | /* LVDS capabilities, stored in a dword */ |
165 | u8 rsvd2:1; | ||
166 | u8 lvds_edid:1; | ||
167 | u8 pixel_dither:1; | ||
168 | u8 pfit_ratio_auto:1; | ||
169 | u8 pfit_gfx_mode_enhanced:1; | ||
170 | u8 pfit_text_mode_enhanced:1; | ||
171 | u8 pfit_mode:2; | 165 | u8 pfit_mode:2; |
166 | u8 pfit_text_mode_enhanced:1; | ||
167 | u8 pfit_gfx_mode_enhanced:1; | ||
168 | u8 pfit_ratio_auto:1; | ||
169 | u8 pixel_dither:1; | ||
170 | u8 lvds_edid:1; | ||
171 | u8 rsvd2:1; | ||
172 | u8 rsvd4; | 172 | u8 rsvd4; |
173 | } __attribute__((packed)); | 173 | } __attribute__((packed)); |
174 | 174 | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index dcaed3466e83..2b6d44381c31 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) | |||
64 | static int intel_crt_mode_valid(struct drm_connector *connector, | 64 | static int intel_crt_mode_valid(struct drm_connector *connector, |
65 | struct drm_display_mode *mode) | 65 | struct drm_display_mode *mode) |
66 | { | 66 | { |
67 | struct drm_device *dev = connector->dev; | ||
68 | |||
69 | int max_clock = 0; | ||
67 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 70 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
68 | return MODE_NO_DBLESCAN; | 71 | return MODE_NO_DBLESCAN; |
69 | 72 | ||
70 | if (mode->clock > 400000 || mode->clock < 25000) | 73 | if (mode->clock < 25000) |
71 | return MODE_CLOCK_RANGE; | 74 | return MODE_CLOCK_LOW; |
75 | |||
76 | if (!IS_I9XX(dev)) | ||
77 | max_clock = 350000; | ||
78 | else | ||
79 | max_clock = 400000; | ||
80 | if (mode->clock > max_clock) | ||
81 | return MODE_CLOCK_HIGH; | ||
72 | 82 | ||
73 | return MODE_OK; | 83 | return MODE_OK; |
74 | } | 84 | } |
@@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
113 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 123 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
114 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; | 124 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; |
115 | 125 | ||
116 | if (intel_crtc->pipe == 0) | 126 | if (intel_crtc->pipe == 0) { |
117 | adpa |= ADPA_PIPE_A_SELECT; | 127 | adpa |= ADPA_PIPE_A_SELECT; |
118 | else | 128 | I915_WRITE(BCLRPAT_A, 0); |
129 | } else { | ||
119 | adpa |= ADPA_PIPE_B_SELECT; | 130 | adpa |= ADPA_PIPE_B_SELECT; |
131 | I915_WRITE(BCLRPAT_B, 0); | ||
132 | } | ||
120 | 133 | ||
121 | I915_WRITE(ADPA, adpa); | 134 | I915_WRITE(ADPA, adpa); |
122 | } | 135 | } |
@@ -133,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
133 | { | 146 | { |
134 | struct drm_device *dev = connector->dev; | 147 | struct drm_device *dev = connector->dev; |
135 | struct drm_i915_private *dev_priv = dev->dev_private; | 148 | struct drm_i915_private *dev_priv = dev->dev_private; |
136 | u32 temp; | 149 | u32 hotplug_en; |
137 | 150 | int i, tries = 0; | |
138 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); | 151 | /* |
139 | 152 | * On 4 series desktop, CRT detect sequence need to be done twice | |
140 | temp = I915_READ(PORT_HOTPLUG_EN); | 153 | * to get a reliable result. |
141 | 154 | */ | |
142 | I915_WRITE(PORT_HOTPLUG_EN, | ||
143 | temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5)); | ||
144 | 155 | ||
145 | do { | 156 | if (IS_G4X(dev) && !IS_GM45(dev)) |
146 | if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) | 157 | tries = 2; |
147 | break; | 158 | else |
148 | msleep(1); | 159 | tries = 1; |
149 | } while (time_after(timeout, jiffies)); | 160 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
161 | hotplug_en &= ~(CRT_HOTPLUG_MASK); | ||
162 | hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; | ||
163 | |||
164 | if (IS_GM45(dev)) | ||
165 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | ||
166 | |||
167 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | ||
168 | |||
169 | for (i = 0; i < tries ; i++) { | ||
170 | unsigned long timeout; | ||
171 | /* turn on the FORCE_DETECT */ | ||
172 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | ||
173 | timeout = jiffies + msecs_to_jiffies(1000); | ||
174 | /* wait for FORCE_DETECT to go off */ | ||
175 | do { | ||
176 | if (!(I915_READ(PORT_HOTPLUG_EN) & | ||
177 | CRT_HOTPLUG_FORCE_DETECT)) | ||
178 | break; | ||
179 | msleep(1); | ||
180 | } while (time_after(timeout, jiffies)); | ||
181 | } | ||
150 | 182 | ||
151 | if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == | 183 | if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == |
152 | CRT_HOTPLUG_MONITOR_COLOR) | 184 | CRT_HOTPLUG_MONITOR_COLOR) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a2834276cb38..d9c50ff94d76 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -56,11 +56,13 @@ typedef struct { | |||
56 | } intel_p2_t; | 56 | } intel_p2_t; |
57 | 57 | ||
58 | #define INTEL_P2_NUM 2 | 58 | #define INTEL_P2_NUM 2 |
59 | 59 | typedef struct intel_limit intel_limit_t; | |
60 | typedef struct { | 60 | struct intel_limit { |
61 | intel_range_t dot, vco, n, m, m1, m2, p, p1; | 61 | intel_range_t dot, vco, n, m, m1, m2, p, p1; |
62 | intel_p2_t p2; | 62 | intel_p2_t p2; |
63 | } intel_limit_t; | 63 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, |
64 | int, int, intel_clock_t *); | ||
65 | }; | ||
64 | 66 | ||
65 | #define I8XX_DOT_MIN 25000 | 67 | #define I8XX_DOT_MIN 25000 |
66 | #define I8XX_DOT_MAX 350000 | 68 | #define I8XX_DOT_MAX 350000 |
@@ -90,18 +92,32 @@ typedef struct { | |||
90 | #define I9XX_DOT_MAX 400000 | 92 | #define I9XX_DOT_MAX 400000 |
91 | #define I9XX_VCO_MIN 1400000 | 93 | #define I9XX_VCO_MIN 1400000 |
92 | #define I9XX_VCO_MAX 2800000 | 94 | #define I9XX_VCO_MAX 2800000 |
95 | #define IGD_VCO_MIN 1700000 | ||
96 | #define IGD_VCO_MAX 3500000 | ||
93 | #define I9XX_N_MIN 1 | 97 | #define I9XX_N_MIN 1 |
94 | #define I9XX_N_MAX 6 | 98 | #define I9XX_N_MAX 6 |
99 | /* IGD's Ncounter is a ring counter */ | ||
100 | #define IGD_N_MIN 3 | ||
101 | #define IGD_N_MAX 6 | ||
95 | #define I9XX_M_MIN 70 | 102 | #define I9XX_M_MIN 70 |
96 | #define I9XX_M_MAX 120 | 103 | #define I9XX_M_MAX 120 |
104 | #define IGD_M_MIN 2 | ||
105 | #define IGD_M_MAX 256 | ||
97 | #define I9XX_M1_MIN 10 | 106 | #define I9XX_M1_MIN 10 |
98 | #define I9XX_M1_MAX 22 | 107 | #define I9XX_M1_MAX 22 |
99 | #define I9XX_M2_MIN 5 | 108 | #define I9XX_M2_MIN 5 |
100 | #define I9XX_M2_MAX 9 | 109 | #define I9XX_M2_MAX 9 |
110 | /* IGD M1 is reserved, and must be 0 */ | ||
111 | #define IGD_M1_MIN 0 | ||
112 | #define IGD_M1_MAX 0 | ||
113 | #define IGD_M2_MIN 0 | ||
114 | #define IGD_M2_MAX 254 | ||
101 | #define I9XX_P_SDVO_DAC_MIN 5 | 115 | #define I9XX_P_SDVO_DAC_MIN 5 |
102 | #define I9XX_P_SDVO_DAC_MAX 80 | 116 | #define I9XX_P_SDVO_DAC_MAX 80 |
103 | #define I9XX_P_LVDS_MIN 7 | 117 | #define I9XX_P_LVDS_MIN 7 |
104 | #define I9XX_P_LVDS_MAX 98 | 118 | #define I9XX_P_LVDS_MAX 98 |
119 | #define IGD_P_LVDS_MIN 7 | ||
120 | #define IGD_P_LVDS_MAX 112 | ||
105 | #define I9XX_P1_MIN 1 | 121 | #define I9XX_P1_MIN 1 |
106 | #define I9XX_P1_MAX 8 | 122 | #define I9XX_P1_MAX 8 |
107 | #define I9XX_P2_SDVO_DAC_SLOW 10 | 123 | #define I9XX_P2_SDVO_DAC_SLOW 10 |
@@ -115,6 +131,97 @@ typedef struct { | |||
115 | #define INTEL_LIMIT_I8XX_LVDS 1 | 131 | #define INTEL_LIMIT_I8XX_LVDS 1 |
116 | #define INTEL_LIMIT_I9XX_SDVO_DAC 2 | 132 | #define INTEL_LIMIT_I9XX_SDVO_DAC 2 |
117 | #define INTEL_LIMIT_I9XX_LVDS 3 | 133 | #define INTEL_LIMIT_I9XX_LVDS 3 |
134 | #define INTEL_LIMIT_G4X_SDVO 4 | ||
135 | #define INTEL_LIMIT_G4X_HDMI_DAC 5 | ||
136 | #define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6 | ||
137 | #define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 | ||
138 | #define INTEL_LIMIT_IGD_SDVO_DAC 8 | ||
139 | #define INTEL_LIMIT_IGD_LVDS 9 | ||
140 | |||
141 | /*The parameter is for SDVO on G4x platform*/ | ||
142 | #define G4X_DOT_SDVO_MIN 25000 | ||
143 | #define G4X_DOT_SDVO_MAX 270000 | ||
144 | #define G4X_VCO_MIN 1750000 | ||
145 | #define G4X_VCO_MAX 3500000 | ||
146 | #define G4X_N_SDVO_MIN 1 | ||
147 | #define G4X_N_SDVO_MAX 4 | ||
148 | #define G4X_M_SDVO_MIN 104 | ||
149 | #define G4X_M_SDVO_MAX 138 | ||
150 | #define G4X_M1_SDVO_MIN 17 | ||
151 | #define G4X_M1_SDVO_MAX 23 | ||
152 | #define G4X_M2_SDVO_MIN 5 | ||
153 | #define G4X_M2_SDVO_MAX 11 | ||
154 | #define G4X_P_SDVO_MIN 10 | ||
155 | #define G4X_P_SDVO_MAX 30 | ||
156 | #define G4X_P1_SDVO_MIN 1 | ||
157 | #define G4X_P1_SDVO_MAX 3 | ||
158 | #define G4X_P2_SDVO_SLOW 10 | ||
159 | #define G4X_P2_SDVO_FAST 10 | ||
160 | #define G4X_P2_SDVO_LIMIT 270000 | ||
161 | |||
162 | /*The parameter is for HDMI_DAC on G4x platform*/ | ||
163 | #define G4X_DOT_HDMI_DAC_MIN 22000 | ||
164 | #define G4X_DOT_HDMI_DAC_MAX 400000 | ||
165 | #define G4X_N_HDMI_DAC_MIN 1 | ||
166 | #define G4X_N_HDMI_DAC_MAX 4 | ||
167 | #define G4X_M_HDMI_DAC_MIN 104 | ||
168 | #define G4X_M_HDMI_DAC_MAX 138 | ||
169 | #define G4X_M1_HDMI_DAC_MIN 16 | ||
170 | #define G4X_M1_HDMI_DAC_MAX 23 | ||
171 | #define G4X_M2_HDMI_DAC_MIN 5 | ||
172 | #define G4X_M2_HDMI_DAC_MAX 11 | ||
173 | #define G4X_P_HDMI_DAC_MIN 5 | ||
174 | #define G4X_P_HDMI_DAC_MAX 80 | ||
175 | #define G4X_P1_HDMI_DAC_MIN 1 | ||
176 | #define G4X_P1_HDMI_DAC_MAX 8 | ||
177 | #define G4X_P2_HDMI_DAC_SLOW 10 | ||
178 | #define G4X_P2_HDMI_DAC_FAST 5 | ||
179 | #define G4X_P2_HDMI_DAC_LIMIT 165000 | ||
180 | |||
181 | /*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/ | ||
182 | #define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000 | ||
183 | #define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000 | ||
184 | #define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1 | ||
185 | #define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3 | ||
186 | #define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104 | ||
187 | #define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138 | ||
188 | #define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17 | ||
189 | #define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23 | ||
190 | #define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5 | ||
191 | #define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11 | ||
192 | #define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28 | ||
193 | #define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112 | ||
194 | #define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2 | ||
195 | #define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8 | ||
196 | #define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14 | ||
197 | #define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14 | ||
198 | #define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0 | ||
199 | |||
200 | /*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/ | ||
201 | #define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000 | ||
202 | #define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000 | ||
203 | #define G4X_N_DUAL_CHANNEL_LVDS_MIN 1 | ||
204 | #define G4X_N_DUAL_CHANNEL_LVDS_MAX 3 | ||
205 | #define G4X_M_DUAL_CHANNEL_LVDS_MIN 104 | ||
206 | #define G4X_M_DUAL_CHANNEL_LVDS_MAX 138 | ||
207 | #define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17 | ||
208 | #define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23 | ||
209 | #define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5 | ||
210 | #define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11 | ||
211 | #define G4X_P_DUAL_CHANNEL_LVDS_MIN 14 | ||
212 | #define G4X_P_DUAL_CHANNEL_LVDS_MAX 42 | ||
213 | #define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2 | ||
214 | #define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6 | ||
215 | #define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7 | ||
216 | #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 | ||
217 | #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 | ||
218 | |||
219 | static bool | ||
220 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
221 | int target, int refclk, intel_clock_t *best_clock); | ||
222 | static bool | ||
223 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
224 | int target, int refclk, intel_clock_t *best_clock); | ||
118 | 225 | ||
119 | static const intel_limit_t intel_limits[] = { | 226 | static const intel_limit_t intel_limits[] = { |
120 | { /* INTEL_LIMIT_I8XX_DVO_DAC */ | 227 | { /* INTEL_LIMIT_I8XX_DVO_DAC */ |
@@ -128,6 +235,7 @@ static const intel_limit_t intel_limits[] = { | |||
128 | .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, | 235 | .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, |
129 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 236 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
130 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, | 237 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, |
238 | .find_pll = intel_find_best_PLL, | ||
131 | }, | 239 | }, |
132 | { /* INTEL_LIMIT_I8XX_LVDS */ | 240 | { /* INTEL_LIMIT_I8XX_LVDS */ |
133 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 241 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, |
@@ -140,6 +248,7 @@ static const intel_limit_t intel_limits[] = { | |||
140 | .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, | 248 | .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, |
141 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 249 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
142 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, | 250 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, |
251 | .find_pll = intel_find_best_PLL, | ||
143 | }, | 252 | }, |
144 | { /* INTEL_LIMIT_I9XX_SDVO_DAC */ | 253 | { /* INTEL_LIMIT_I9XX_SDVO_DAC */ |
145 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 254 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
@@ -152,6 +261,7 @@ static const intel_limit_t intel_limits[] = { | |||
152 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 261 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, |
153 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 262 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
154 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 263 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
264 | .find_pll = intel_find_best_PLL, | ||
155 | }, | 265 | }, |
156 | { /* INTEL_LIMIT_I9XX_LVDS */ | 266 | { /* INTEL_LIMIT_I9XX_LVDS */ |
157 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 267 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
@@ -167,19 +277,157 @@ static const intel_limit_t intel_limits[] = { | |||
167 | */ | 277 | */ |
168 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 278 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
169 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, | 279 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, |
280 | .find_pll = intel_find_best_PLL, | ||
281 | }, | ||
282 | /* below parameter and function is for G4X Chipset Family*/ | ||
283 | { /* INTEL_LIMIT_G4X_SDVO */ | ||
284 | .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, | ||
285 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | ||
286 | .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, | ||
287 | .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX }, | ||
288 | .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX }, | ||
289 | .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX }, | ||
290 | .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX }, | ||
291 | .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX}, | ||
292 | .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT, | ||
293 | .p2_slow = G4X_P2_SDVO_SLOW, | ||
294 | .p2_fast = G4X_P2_SDVO_FAST | ||
295 | }, | ||
296 | .find_pll = intel_g4x_find_best_PLL, | ||
297 | }, | ||
298 | { /* INTEL_LIMIT_G4X_HDMI_DAC */ | ||
299 | .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, | ||
300 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | ||
301 | .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, | ||
302 | .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX }, | ||
303 | .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX }, | ||
304 | .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX }, | ||
305 | .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX }, | ||
306 | .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX}, | ||
307 | .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT, | ||
308 | .p2_slow = G4X_P2_HDMI_DAC_SLOW, | ||
309 | .p2_fast = G4X_P2_HDMI_DAC_FAST | ||
310 | }, | ||
311 | .find_pll = intel_g4x_find_best_PLL, | ||
312 | }, | ||
313 | { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ | ||
314 | .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, | ||
315 | .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, | ||
316 | .vco = { .min = G4X_VCO_MIN, | ||
317 | .max = G4X_VCO_MAX }, | ||
318 | .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN, | ||
319 | .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX }, | ||
320 | .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN, | ||
321 | .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX }, | ||
322 | .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN, | ||
323 | .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX }, | ||
324 | .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN, | ||
325 | .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX }, | ||
326 | .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN, | ||
327 | .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX }, | ||
328 | .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN, | ||
329 | .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX }, | ||
330 | .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT, | ||
331 | .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW, | ||
332 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST | ||
333 | }, | ||
334 | .find_pll = intel_g4x_find_best_PLL, | ||
335 | }, | ||
336 | { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ | ||
337 | .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, | ||
338 | .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, | ||
339 | .vco = { .min = G4X_VCO_MIN, | ||
340 | .max = G4X_VCO_MAX }, | ||
341 | .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN, | ||
342 | .max = G4X_N_DUAL_CHANNEL_LVDS_MAX }, | ||
343 | .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN, | ||
344 | .max = G4X_M_DUAL_CHANNEL_LVDS_MAX }, | ||
345 | .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN, | ||
346 | .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX }, | ||
347 | .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN, | ||
348 | .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX }, | ||
349 | .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN, | ||
350 | .max = G4X_P_DUAL_CHANNEL_LVDS_MAX }, | ||
351 | .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN, | ||
352 | .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX }, | ||
353 | .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT, | ||
354 | .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW, | ||
355 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST | ||
356 | }, | ||
357 | .find_pll = intel_g4x_find_best_PLL, | ||
358 | }, | ||
359 | { /* INTEL_LIMIT_IGD_SDVO */ | ||
360 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, | ||
361 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, | ||
362 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, | ||
363 | .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, | ||
364 | .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, | ||
365 | .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, | ||
366 | .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, | ||
367 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | ||
368 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | ||
369 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | ||
170 | }, | 370 | }, |
371 | { /* INTEL_LIMIT_IGD_LVDS */ | ||
372 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | ||
373 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, | ||
374 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, | ||
375 | .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, | ||
376 | .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, | ||
377 | .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, | ||
378 | .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX }, | ||
379 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | ||
380 | /* IGD only supports single-channel mode. */ | ||
381 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | ||
382 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, | ||
383 | }, | ||
384 | |||
171 | }; | 385 | }; |
172 | 386 | ||
387 | static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) | ||
388 | { | ||
389 | struct drm_device *dev = crtc->dev; | ||
390 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
391 | const intel_limit_t *limit; | ||
392 | |||
393 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
394 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | ||
395 | LVDS_CLKB_POWER_UP) | ||
396 | /* LVDS with dual channel */ | ||
397 | limit = &intel_limits | ||
398 | [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS]; | ||
399 | else | ||
400 | /* LVDS with dual channel */ | ||
401 | limit = &intel_limits | ||
402 | [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS]; | ||
403 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || | ||
404 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { | ||
405 | limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC]; | ||
406 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { | ||
407 | limit = &intel_limits[INTEL_LIMIT_G4X_SDVO]; | ||
408 | } else /* The option is for other outputs */ | ||
409 | limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; | ||
410 | |||
411 | return limit; | ||
412 | } | ||
413 | |||
173 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | 414 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc) |
174 | { | 415 | { |
175 | struct drm_device *dev = crtc->dev; | 416 | struct drm_device *dev = crtc->dev; |
176 | const intel_limit_t *limit; | 417 | const intel_limit_t *limit; |
177 | 418 | ||
178 | if (IS_I9XX(dev)) { | 419 | if (IS_G4X(dev)) { |
420 | limit = intel_g4x_limit(crtc); | ||
421 | } else if (IS_I9XX(dev) && !IS_IGD(dev)) { | ||
179 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 422 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
180 | limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; | 423 | limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; |
181 | else | 424 | else |
182 | limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; | 425 | limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; |
426 | } else if (IS_IGD(dev)) { | ||
427 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | ||
428 | limit = &intel_limits[INTEL_LIMIT_IGD_LVDS]; | ||
429 | else | ||
430 | limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC]; | ||
183 | } else { | 431 | } else { |
184 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 432 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
185 | limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; | 433 | limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; |
@@ -189,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | |||
189 | return limit; | 437 | return limit; |
190 | } | 438 | } |
191 | 439 | ||
192 | static void intel_clock(int refclk, intel_clock_t *clock) | 440 | /* m1 is reserved as 0 in IGD, n is a ring counter */ |
441 | static void igd_clock(int refclk, intel_clock_t *clock) | ||
193 | { | 442 | { |
443 | clock->m = clock->m2 + 2; | ||
444 | clock->p = clock->p1 * clock->p2; | ||
445 | clock->vco = refclk * clock->m / clock->n; | ||
446 | clock->dot = clock->vco / clock->p; | ||
447 | } | ||
448 | |||
449 | static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) | ||
450 | { | ||
451 | if (IS_IGD(dev)) { | ||
452 | igd_clock(refclk, clock); | ||
453 | return; | ||
454 | } | ||
194 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); | 455 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); |
195 | clock->p = clock->p1 * clock->p2; | 456 | clock->p = clock->p1 * clock->p2; |
196 | clock->vco = refclk * clock->m / (clock->n + 2); | 457 | clock->vco = refclk * clock->m / (clock->n + 2); |
@@ -226,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
226 | static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | 487 | static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) |
227 | { | 488 | { |
228 | const intel_limit_t *limit = intel_limit (crtc); | 489 | const intel_limit_t *limit = intel_limit (crtc); |
490 | struct drm_device *dev = crtc->dev; | ||
229 | 491 | ||
230 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) | 492 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
231 | INTELPllInvalid ("p1 out of range\n"); | 493 | INTELPllInvalid ("p1 out of range\n"); |
@@ -235,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | |||
235 | INTELPllInvalid ("m2 out of range\n"); | 497 | INTELPllInvalid ("m2 out of range\n"); |
236 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) | 498 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
237 | INTELPllInvalid ("m1 out of range\n"); | 499 | INTELPllInvalid ("m1 out of range\n"); |
238 | if (clock->m1 <= clock->m2) | 500 | if (clock->m1 <= clock->m2 && !IS_IGD(dev)) |
239 | INTELPllInvalid ("m1 <= m2\n"); | 501 | INTELPllInvalid ("m1 <= m2\n"); |
240 | if (clock->m < limit->m.min || limit->m.max < clock->m) | 502 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
241 | INTELPllInvalid ("m out of range\n"); | 503 | INTELPllInvalid ("m out of range\n"); |
@@ -252,18 +514,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | |||
252 | return true; | 514 | return true; |
253 | } | 515 | } |
254 | 516 | ||
255 | /** | 517 | static bool |
256 | * Returns a set of divisors for the desired target clock with the given | 518 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
257 | * refclk, or FALSE. The returned values represent the clock equation: | 519 | int target, int refclk, intel_clock_t *best_clock) |
258 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | 520 | |
259 | */ | ||
260 | static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, | ||
261 | int refclk, intel_clock_t *best_clock) | ||
262 | { | 521 | { |
263 | struct drm_device *dev = crtc->dev; | 522 | struct drm_device *dev = crtc->dev; |
264 | struct drm_i915_private *dev_priv = dev->dev_private; | 523 | struct drm_i915_private *dev_priv = dev->dev_private; |
265 | intel_clock_t clock; | 524 | intel_clock_t clock; |
266 | const intel_limit_t *limit = intel_limit(crtc); | ||
267 | int err = target; | 525 | int err = target; |
268 | 526 | ||
269 | if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | 527 | if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
@@ -289,15 +547,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, | |||
289 | memset (best_clock, 0, sizeof (*best_clock)); | 547 | memset (best_clock, 0, sizeof (*best_clock)); |
290 | 548 | ||
291 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { | 549 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { |
292 | for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && | 550 | for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { |
293 | clock.m2 <= limit->m2.max; clock.m2++) { | 551 | /* m1 is always 0 in IGD */ |
552 | if (clock.m2 >= clock.m1 && !IS_IGD(dev)) | ||
553 | break; | ||
294 | for (clock.n = limit->n.min; clock.n <= limit->n.max; | 554 | for (clock.n = limit->n.min; clock.n <= limit->n.max; |
295 | clock.n++) { | 555 | clock.n++) { |
296 | for (clock.p1 = limit->p1.min; | 556 | for (clock.p1 = limit->p1.min; |
297 | clock.p1 <= limit->p1.max; clock.p1++) { | 557 | clock.p1 <= limit->p1.max; clock.p1++) { |
298 | int this_err; | 558 | int this_err; |
299 | 559 | ||
300 | intel_clock(refclk, &clock); | 560 | intel_clock(dev, refclk, &clock); |
301 | 561 | ||
302 | if (!intel_PLL_is_valid(crtc, &clock)) | 562 | if (!intel_PLL_is_valid(crtc, &clock)) |
303 | continue; | 563 | continue; |
@@ -315,6 +575,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, | |||
315 | return (err != target); | 575 | return (err != target); |
316 | } | 576 | } |
317 | 577 | ||
578 | static bool | ||
579 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
580 | int target, int refclk, intel_clock_t *best_clock) | ||
581 | { | ||
582 | struct drm_device *dev = crtc->dev; | ||
583 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
584 | intel_clock_t clock; | ||
585 | int max_n; | ||
586 | bool found; | ||
587 | /* approximately equals target * 0.00488 */ | ||
588 | int err_most = (target >> 8) + (target >> 10); | ||
589 | found = false; | ||
590 | |||
591 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
592 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | ||
593 | LVDS_CLKB_POWER_UP) | ||
594 | clock.p2 = limit->p2.p2_fast; | ||
595 | else | ||
596 | clock.p2 = limit->p2.p2_slow; | ||
597 | } else { | ||
598 | if (target < limit->p2.dot_limit) | ||
599 | clock.p2 = limit->p2.p2_slow; | ||
600 | else | ||
601 | clock.p2 = limit->p2.p2_fast; | ||
602 | } | ||
603 | |||
604 | memset(best_clock, 0, sizeof(*best_clock)); | ||
605 | max_n = limit->n.max; | ||
606 | /* based on hardware requriment prefer smaller n to precision */ | ||
607 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { | ||
608 | /* based on hardware requirment prefere larger m1,m2, p1 */ | ||
609 | for (clock.m1 = limit->m1.max; | ||
610 | clock.m1 >= limit->m1.min; clock.m1--) { | ||
611 | for (clock.m2 = limit->m2.max; | ||
612 | clock.m2 >= limit->m2.min; clock.m2--) { | ||
613 | for (clock.p1 = limit->p1.max; | ||
614 | clock.p1 >= limit->p1.min; clock.p1--) { | ||
615 | int this_err; | ||
616 | |||
617 | intel_clock(dev, refclk, &clock); | ||
618 | if (!intel_PLL_is_valid(crtc, &clock)) | ||
619 | continue; | ||
620 | this_err = abs(clock.dot - target) ; | ||
621 | if (this_err < err_most) { | ||
622 | *best_clock = clock; | ||
623 | err_most = this_err; | ||
624 | max_n = clock.n; | ||
625 | found = true; | ||
626 | } | ||
627 | } | ||
628 | } | ||
629 | } | ||
630 | } | ||
631 | |||
632 | return found; | ||
633 | } | ||
634 | |||
318 | void | 635 | void |
319 | intel_wait_for_vblank(struct drm_device *dev) | 636 | intel_wait_for_vblank(struct drm_device *dev) |
320 | { | 637 | { |
@@ -634,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev) | |||
634 | return 400000; | 951 | return 400000; |
635 | else if (IS_I915G(dev)) | 952 | else if (IS_I915G(dev)) |
636 | return 333000; | 953 | return 333000; |
637 | else if (IS_I945GM(dev) || IS_845G(dev)) | 954 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) |
638 | return 200000; | 955 | return 200000; |
639 | else if (IS_I915GM(dev)) { | 956 | else if (IS_I915GM(dev)) { |
640 | u16 gcfgc = 0; | 957 | u16 gcfgc = 0; |
@@ -733,6 +1050,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
733 | bool is_crt = false, is_lvds = false, is_tv = false; | 1050 | bool is_crt = false, is_lvds = false, is_tv = false; |
734 | struct drm_mode_config *mode_config = &dev->mode_config; | 1051 | struct drm_mode_config *mode_config = &dev->mode_config; |
735 | struct drm_connector *connector; | 1052 | struct drm_connector *connector; |
1053 | const intel_limit_t *limit; | ||
736 | int ret; | 1054 | int ret; |
737 | 1055 | ||
738 | drm_vblank_pre_modeset(dev, pipe); | 1056 | drm_vblank_pre_modeset(dev, pipe); |
@@ -776,13 +1094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
776 | refclk = 48000; | 1094 | refclk = 48000; |
777 | } | 1095 | } |
778 | 1096 | ||
779 | ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); | 1097 | /* |
1098 | * Returns a set of divisors for the desired target clock with the given | ||
1099 | * refclk, or FALSE. The returned values represent the clock equation: | ||
1100 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | ||
1101 | */ | ||
1102 | limit = intel_limit(crtc); | ||
1103 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); | ||
780 | if (!ok) { | 1104 | if (!ok) { |
781 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 1105 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
782 | return -EINVAL; | 1106 | return -EINVAL; |
783 | } | 1107 | } |
784 | 1108 | ||
785 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | 1109 | if (IS_IGD(dev)) |
1110 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; | ||
1111 | else | ||
1112 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | ||
786 | 1113 | ||
787 | dpll = DPLL_VGA_MODE_DIS; | 1114 | dpll = DPLL_VGA_MODE_DIS; |
788 | if (IS_I9XX(dev)) { | 1115 | if (IS_I9XX(dev)) { |
@@ -799,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
799 | } | 1126 | } |
800 | 1127 | ||
801 | /* compute bitmask from p1 value */ | 1128 | /* compute bitmask from p1 value */ |
802 | dpll |= (1 << (clock.p1 - 1)) << 16; | 1129 | if (IS_IGD(dev)) |
1130 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; | ||
1131 | else | ||
1132 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | ||
803 | switch (clock.p2) { | 1133 | switch (clock.p2) { |
804 | case 5: | 1134 | case 5: |
805 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; | 1135 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
@@ -1279,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
1279 | fp = I915_READ((pipe == 0) ? FPA1 : FPB1); | 1609 | fp = I915_READ((pipe == 0) ? FPA1 : FPB1); |
1280 | 1610 | ||
1281 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | 1611 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
1282 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; | 1612 | if (IS_IGD(dev)) { |
1283 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; | 1613 | clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; |
1614 | clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT; | ||
1615 | } else { | ||
1616 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; | ||
1617 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; | ||
1618 | } | ||
1619 | |||
1284 | if (IS_I9XX(dev)) { | 1620 | if (IS_I9XX(dev)) { |
1285 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> | 1621 | if (IS_IGD(dev)) |
1622 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >> | ||
1623 | DPLL_FPA01_P1_POST_DIV_SHIFT_IGD); | ||
1624 | else | ||
1625 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> | ||
1286 | DPLL_FPA01_P1_POST_DIV_SHIFT); | 1626 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
1287 | 1627 | ||
1288 | switch (dpll & DPLL_MODE_MASK) { | 1628 | switch (dpll & DPLL_MODE_MASK) { |
@@ -1301,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
1301 | } | 1641 | } |
1302 | 1642 | ||
1303 | /* XXX: Handle the 100Mhz refclk */ | 1643 | /* XXX: Handle the 100Mhz refclk */ |
1304 | intel_clock(96000, &clock); | 1644 | intel_clock(dev, 96000, &clock); |
1305 | } else { | 1645 | } else { |
1306 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); | 1646 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); |
1307 | 1647 | ||
@@ -1313,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
1313 | if ((dpll & PLL_REF_INPUT_MASK) == | 1653 | if ((dpll & PLL_REF_INPUT_MASK) == |
1314 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { | 1654 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { |
1315 | /* XXX: might not be 66MHz */ | 1655 | /* XXX: might not be 66MHz */ |
1316 | intel_clock(66000, &clock); | 1656 | intel_clock(dev, 66000, &clock); |
1317 | } else | 1657 | } else |
1318 | intel_clock(48000, &clock); | 1658 | intel_clock(dev, 48000, &clock); |
1319 | } else { | 1659 | } else { |
1320 | if (dpll & PLL_P1_DIVIDE_BY_TWO) | 1660 | if (dpll & PLL_P1_DIVIDE_BY_TWO) |
1321 | clock.p1 = 2; | 1661 | clock.p1 = 2; |
@@ -1328,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
1328 | else | 1668 | else |
1329 | clock.p2 = 2; | 1669 | clock.p2 = 2; |
1330 | 1670 | ||
1331 | intel_clock(48000, &clock); | 1671 | intel_clock(dev, 48000, &clock); |
1332 | } | 1672 | } |
1333 | } | 1673 | } |
1334 | 1674 | ||
@@ -1474,13 +1814,21 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
1474 | 1814 | ||
1475 | if (IS_I9XX(dev)) { | 1815 | if (IS_I9XX(dev)) { |
1476 | int found; | 1816 | int found; |
1817 | u32 reg; | ||
1477 | 1818 | ||
1478 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | 1819 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
1479 | found = intel_sdvo_init(dev, SDVOB); | 1820 | found = intel_sdvo_init(dev, SDVOB); |
1480 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 1821 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
1481 | intel_hdmi_init(dev, SDVOB); | 1822 | intel_hdmi_init(dev, SDVOB); |
1482 | } | 1823 | } |
1483 | if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { | 1824 | |
1825 | /* Before G4X SDVOC doesn't have its own detect register */ | ||
1826 | if (IS_G4X(dev)) | ||
1827 | reg = SDVOC; | ||
1828 | else | ||
1829 | reg = SDVOB; | ||
1830 | |||
1831 | if (I915_READ(reg) & SDVO_DETECTED) { | ||
1484 | found = intel_sdvo_init(dev, SDVOC); | 1832 | found = intel_sdvo_init(dev, SDVOC); |
1485 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 1833 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
1486 | intel_hdmi_init(dev, SDVOC); | 1834 | intel_hdmi_init(dev, SDVOC); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 0d211af98854..6619f26e46a5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
265 | pfit_control = 0; | 265 | pfit_control = 0; |
266 | 266 | ||
267 | if (!IS_I965G(dev)) { | 267 | if (!IS_I965G(dev)) { |
268 | if (dev_priv->panel_wants_dither) | 268 | if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) |
269 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | 269 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; |
270 | } | 270 | } |
271 | else | 271 | else |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 56485d67369b..ceca9471a75a 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -217,8 +217,8 @@ static const u32 filter_table[] = { | |||
217 | */ | 217 | */ |
218 | static const struct color_conversion ntsc_m_csc_composite = { | 218 | static const struct color_conversion ntsc_m_csc_composite = { |
219 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, | 219 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, |
220 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, | 220 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, |
221 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, | 221 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, |
222 | }; | 222 | }; |
223 | 223 | ||
224 | static const struct video_levels ntsc_m_levels_composite = { | 224 | static const struct video_levels ntsc_m_levels_composite = { |
@@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = { | |||
226 | }; | 226 | }; |
227 | 227 | ||
228 | static const struct color_conversion ntsc_m_csc_svideo = { | 228 | static const struct color_conversion ntsc_m_csc_svideo = { |
229 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, | 229 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, |
230 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, | 230 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, |
231 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, | 231 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, |
232 | }; | 232 | }; |
233 | 233 | ||
234 | static const struct video_levels ntsc_m_levels_svideo = { | 234 | static const struct video_levels ntsc_m_levels_svideo = { |
@@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = { | |||
237 | 237 | ||
238 | static const struct color_conversion ntsc_j_csc_composite = { | 238 | static const struct color_conversion ntsc_j_csc_composite = { |
239 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, | 239 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, |
240 | .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00, | 240 | .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200, |
241 | .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00, | 241 | .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200, |
242 | }; | 242 | }; |
243 | 243 | ||
244 | static const struct video_levels ntsc_j_levels_composite = { | 244 | static const struct video_levels ntsc_j_levels_composite = { |
@@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = { | |||
247 | 247 | ||
248 | static const struct color_conversion ntsc_j_csc_svideo = { | 248 | static const struct color_conversion ntsc_j_csc_svideo = { |
249 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, | 249 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, |
250 | .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00, | 250 | .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200, |
251 | .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00, | 251 | .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200, |
252 | }; | 252 | }; |
253 | 253 | ||
254 | static const struct video_levels ntsc_j_levels_svideo = { | 254 | static const struct video_levels ntsc_j_levels_svideo = { |
@@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = { | |||
257 | 257 | ||
258 | static const struct color_conversion pal_csc_composite = { | 258 | static const struct color_conversion pal_csc_composite = { |
259 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, | 259 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, |
260 | .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00, | 260 | .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200, |
261 | .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00, | 261 | .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200, |
262 | }; | 262 | }; |
263 | 263 | ||
264 | static const struct video_levels pal_levels_composite = { | 264 | static const struct video_levels pal_levels_composite = { |
@@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = { | |||
267 | 267 | ||
268 | static const struct color_conversion pal_csc_svideo = { | 268 | static const struct color_conversion pal_csc_svideo = { |
269 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, | 269 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, |
270 | .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00, | 270 | .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200, |
271 | .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00, | 271 | .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200, |
272 | }; | 272 | }; |
273 | 273 | ||
274 | static const struct video_levels pal_levels_svideo = { | 274 | static const struct video_levels pal_levels_svideo = { |
@@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = { | |||
277 | 277 | ||
278 | static const struct color_conversion pal_m_csc_composite = { | 278 | static const struct color_conversion pal_m_csc_composite = { |
279 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, | 279 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, |
280 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, | 280 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, |
281 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, | 281 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, |
282 | }; | 282 | }; |
283 | 283 | ||
284 | static const struct video_levels pal_m_levels_composite = { | 284 | static const struct video_levels pal_m_levels_composite = { |
@@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = { | |||
286 | }; | 286 | }; |
287 | 287 | ||
288 | static const struct color_conversion pal_m_csc_svideo = { | 288 | static const struct color_conversion pal_m_csc_svideo = { |
289 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, | 289 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, |
290 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, | 290 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, |
291 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, | 291 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, |
292 | }; | 292 | }; |
293 | 293 | ||
294 | static const struct video_levels pal_m_levels_svideo = { | 294 | static const struct video_levels pal_m_levels_svideo = { |
@@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = { | |||
297 | 297 | ||
298 | static const struct color_conversion pal_n_csc_composite = { | 298 | static const struct color_conversion pal_n_csc_composite = { |
299 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, | 299 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, |
300 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, | 300 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, |
301 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, | 301 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, |
302 | }; | 302 | }; |
303 | 303 | ||
304 | static const struct video_levels pal_n_levels_composite = { | 304 | static const struct video_levels pal_n_levels_composite = { |
@@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = { | |||
306 | }; | 306 | }; |
307 | 307 | ||
308 | static const struct color_conversion pal_n_csc_svideo = { | 308 | static const struct color_conversion pal_n_csc_svideo = { |
309 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, | 309 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, |
310 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, | 310 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, |
311 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, | 311 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, |
312 | }; | 312 | }; |
313 | 313 | ||
314 | static const struct video_levels pal_n_levels_svideo = { | 314 | static const struct video_levels pal_n_levels_svideo = { |
@@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = { | |||
319 | * Component connections | 319 | * Component connections |
320 | */ | 320 | */ |
321 | static const struct color_conversion sdtv_csc_yprpb = { | 321 | static const struct color_conversion sdtv_csc_yprpb = { |
322 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146, | 322 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, |
323 | .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00, | 323 | .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200, |
324 | .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00, | 324 | .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200, |
325 | }; | 325 | }; |
326 | 326 | ||
327 | static const struct color_conversion sdtv_csc_rgb = { | 327 | static const struct color_conversion sdtv_csc_rgb = { |
@@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = { | |||
331 | }; | 331 | }; |
332 | 332 | ||
333 | static const struct color_conversion hdtv_csc_yprpb = { | 333 | static const struct color_conversion hdtv_csc_yprpb = { |
334 | .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146, | 334 | .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145, |
335 | .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00, | 335 | .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200, |
336 | .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00, | 336 | .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200, |
337 | }; | 337 | }; |
338 | 338 | ||
339 | static const struct color_conversion hdtv_csc_rgb = { | 339 | static const struct color_conversion hdtv_csc_rgb = { |
@@ -414,7 +414,7 @@ struct tv_mode { | |||
414 | static const struct tv_mode tv_modes[] = { | 414 | static const struct tv_mode tv_modes[] = { |
415 | { | 415 | { |
416 | .name = "NTSC-M", | 416 | .name = "NTSC-M", |
417 | .clock = 107520, | 417 | .clock = 108000, |
418 | .refresh = 29970, | 418 | .refresh = 29970, |
419 | .oversample = TV_OVERSAMPLE_8X, | 419 | .oversample = TV_OVERSAMPLE_8X, |
420 | .component_only = 0, | 420 | .component_only = 0, |
@@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = { | |||
442 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, | 442 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, |
443 | 443 | ||
444 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ | 444 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ |
445 | .dda1_inc = 136, | 445 | .dda1_inc = 135, |
446 | .dda2_inc = 7624, .dda2_size = 20013, | 446 | .dda2_inc = 20800, .dda2_size = 27456, |
447 | .dda3_inc = 0, .dda3_size = 0, | 447 | .dda3_inc = 0, .dda3_size = 0, |
448 | .sc_reset = TV_SC_RESET_EVERY_4, | 448 | .sc_reset = TV_SC_RESET_EVERY_4, |
449 | .pal_burst = false, | 449 | .pal_burst = false, |
@@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = { | |||
457 | }, | 457 | }, |
458 | { | 458 | { |
459 | .name = "NTSC-443", | 459 | .name = "NTSC-443", |
460 | .clock = 107520, | 460 | .clock = 108000, |
461 | .refresh = 29970, | 461 | .refresh = 29970, |
462 | .oversample = TV_OVERSAMPLE_8X, | 462 | .oversample = TV_OVERSAMPLE_8X, |
463 | .component_only = 0, | 463 | .component_only = 0, |
@@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = { | |||
485 | 485 | ||
486 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ | 486 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ |
487 | .dda1_inc = 168, | 487 | .dda1_inc = 168, |
488 | .dda2_inc = 18557, .dda2_size = 20625, | 488 | .dda2_inc = 4093, .dda2_size = 27456, |
489 | .dda3_inc = 0, .dda3_size = 0, | 489 | .dda3_inc = 310, .dda3_size = 525, |
490 | .sc_reset = TV_SC_RESET_EVERY_8, | 490 | .sc_reset = TV_SC_RESET_NEVER, |
491 | .pal_burst = true, | 491 | .pal_burst = false, |
492 | 492 | ||
493 | .composite_levels = &ntsc_m_levels_composite, | 493 | .composite_levels = &ntsc_m_levels_composite, |
494 | .composite_color = &ntsc_m_csc_composite, | 494 | .composite_color = &ntsc_m_csc_composite, |
@@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = { | |||
499 | }, | 499 | }, |
500 | { | 500 | { |
501 | .name = "NTSC-J", | 501 | .name = "NTSC-J", |
502 | .clock = 107520, | 502 | .clock = 108000, |
503 | .refresh = 29970, | 503 | .refresh = 29970, |
504 | .oversample = TV_OVERSAMPLE_8X, | 504 | .oversample = TV_OVERSAMPLE_8X, |
505 | .component_only = 0, | 505 | .component_only = 0, |
@@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = { | |||
527 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, | 527 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, |
528 | 528 | ||
529 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ | 529 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ |
530 | .dda1_inc = 136, | 530 | .dda1_inc = 135, |
531 | .dda2_inc = 7624, .dda2_size = 20013, | 531 | .dda2_inc = 20800, .dda2_size = 27456, |
532 | .dda3_inc = 0, .dda3_size = 0, | 532 | .dda3_inc = 0, .dda3_size = 0, |
533 | .sc_reset = TV_SC_RESET_EVERY_4, | 533 | .sc_reset = TV_SC_RESET_EVERY_4, |
534 | .pal_burst = false, | 534 | .pal_burst = false, |
@@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = { | |||
542 | }, | 542 | }, |
543 | { | 543 | { |
544 | .name = "PAL-M", | 544 | .name = "PAL-M", |
545 | .clock = 107520, | 545 | .clock = 108000, |
546 | .refresh = 29970, | 546 | .refresh = 29970, |
547 | .oversample = TV_OVERSAMPLE_8X, | 547 | .oversample = TV_OVERSAMPLE_8X, |
548 | .component_only = 0, | 548 | .component_only = 0, |
@@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = { | |||
570 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, | 570 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, |
571 | 571 | ||
572 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ | 572 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ |
573 | .dda1_inc = 136, | 573 | .dda1_inc = 135, |
574 | .dda2_inc = 7624, .dda2_size = 20013, | 574 | .dda2_inc = 16704, .dda2_size = 27456, |
575 | .dda3_inc = 0, .dda3_size = 0, | 575 | .dda3_inc = 0, .dda3_size = 0, |
576 | .sc_reset = TV_SC_RESET_EVERY_4, | 576 | .sc_reset = TV_SC_RESET_EVERY_8, |
577 | .pal_burst = false, | 577 | .pal_burst = true, |
578 | 578 | ||
579 | .composite_levels = &pal_m_levels_composite, | 579 | .composite_levels = &pal_m_levels_composite, |
580 | .composite_color = &pal_m_csc_composite, | 580 | .composite_color = &pal_m_csc_composite, |
@@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = { | |||
586 | { | 586 | { |
587 | /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ | 587 | /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ |
588 | .name = "PAL-N", | 588 | .name = "PAL-N", |
589 | .clock = 107520, | 589 | .clock = 108000, |
590 | .refresh = 25000, | 590 | .refresh = 25000, |
591 | .oversample = TV_OVERSAMPLE_8X, | 591 | .oversample = TV_OVERSAMPLE_8X, |
592 | .component_only = 0, | 592 | .component_only = 0, |
@@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = { | |||
615 | 615 | ||
616 | 616 | ||
617 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ | 617 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ |
618 | .dda1_inc = 168, | 618 | .dda1_inc = 135, |
619 | .dda2_inc = 18557, .dda2_size = 20625, | 619 | .dda2_inc = 23578, .dda2_size = 27648, |
620 | .dda3_inc = 0, .dda3_size = 0, | 620 | .dda3_inc = 134, .dda3_size = 625, |
621 | .sc_reset = TV_SC_RESET_EVERY_8, | 621 | .sc_reset = TV_SC_RESET_EVERY_8, |
622 | .pal_burst = true, | 622 | .pal_burst = true, |
623 | 623 | ||
@@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = { | |||
631 | { | 631 | { |
632 | /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ | 632 | /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ |
633 | .name = "PAL", | 633 | .name = "PAL", |
634 | .clock = 107520, | 634 | .clock = 108000, |
635 | .refresh = 25000, | 635 | .refresh = 25000, |
636 | .oversample = TV_OVERSAMPLE_8X, | 636 | .oversample = TV_OVERSAMPLE_8X, |
637 | .component_only = 0, | 637 | .component_only = 0, |
638 | 638 | ||
639 | .hsync_end = 64, .hblank_end = 128, | 639 | .hsync_end = 64, .hblank_end = 142, |
640 | .hblank_start = 844, .htotal = 863, | 640 | .hblank_start = 844, .htotal = 863, |
641 | 641 | ||
642 | .progressive = false, .trilevel_sync = false, | 642 | .progressive = false, .trilevel_sync = false, |
@@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = { | |||
659 | 659 | ||
660 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ | 660 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ |
661 | .dda1_inc = 168, | 661 | .dda1_inc = 168, |
662 | .dda2_inc = 18557, .dda2_size = 20625, | 662 | .dda2_inc = 4122, .dda2_size = 27648, |
663 | .dda3_inc = 0, .dda3_size = 0, | 663 | .dda3_inc = 67, .dda3_size = 625, |
664 | .sc_reset = TV_SC_RESET_EVERY_8, | 664 | .sc_reset = TV_SC_RESET_EVERY_8, |
665 | .pal_burst = true, | 665 | .pal_burst = true, |
666 | 666 | ||
@@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = { | |||
689 | .veq_ena = false, | 689 | .veq_ena = false, |
690 | 690 | ||
691 | .vi_end_f1 = 44, .vi_end_f2 = 44, | 691 | .vi_end_f1 = 44, .vi_end_f2 = 44, |
692 | .nbr_end = 496, | 692 | .nbr_end = 479, |
693 | 693 | ||
694 | .burst_ena = false, | 694 | .burst_ena = false, |
695 | 695 | ||
@@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = { | |||
713 | .veq_ena = false, | 713 | .veq_ena = false, |
714 | 714 | ||
715 | .vi_end_f1 = 44, .vi_end_f2 = 44, | 715 | .vi_end_f1 = 44, .vi_end_f2 = 44, |
716 | .nbr_end = 496, | 716 | .nbr_end = 479, |
717 | 717 | ||
718 | .burst_ena = false, | 718 | .burst_ena = false, |
719 | 719 | ||
@@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = { | |||
876 | .component_only = 1, | 876 | .component_only = 1, |
877 | 877 | ||
878 | .hsync_end = 88, .hblank_end = 235, | 878 | .hsync_end = 88, .hblank_end = 235, |
879 | .hblank_start = 2155, .htotal = 2200, | 879 | .hblank_start = 2155, .htotal = 2201, |
880 | 880 | ||
881 | .progressive = false, .trilevel_sync = true, | 881 | .progressive = false, .trilevel_sync = true, |
882 | 882 | ||
@@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo | |||
1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); |
1083 | 1083 | ||
1084 | /* Ensure TV refresh is close to desired refresh */ | 1084 | /* Ensure TV refresh is close to desired refresh */ |
1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1) | 1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10) |
1086 | return MODE_OK; | 1086 | return MODE_OK; |
1087 | return MODE_CLOCK_RANGE; | 1087 | return MODE_CLOCK_RANGE; |
1088 | } | 1088 | } |
@@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1135 | if (!tv_mode) | 1135 | if (!tv_mode) |
1136 | return; /* can't happen (mode_prepare prevents this) */ | 1136 | return; /* can't happen (mode_prepare prevents this) */ |
1137 | 1137 | ||
1138 | tv_ctl = 0; | 1138 | tv_ctl = I915_READ(TV_CTL); |
1139 | tv_ctl &= TV_CTL_SAVE; | ||
1139 | 1140 | ||
1140 | switch (tv_priv->type) { | 1141 | switch (tv_priv->type) { |
1141 | default: | 1142 | default: |
@@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1215 | /* dda1 implies valid video levels */ | 1216 | /* dda1 implies valid video levels */ |
1216 | if (tv_mode->dda1_inc) { | 1217 | if (tv_mode->dda1_inc) { |
1217 | scctl1 |= TV_SC_DDA1_EN; | 1218 | scctl1 |= TV_SC_DDA1_EN; |
1218 | scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; | ||
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | if (tv_mode->dda2_inc) | 1221 | if (tv_mode->dda2_inc) |
@@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1225 | scctl1 |= TV_SC_DDA3_EN; | 1225 | scctl1 |= TV_SC_DDA3_EN; |
1226 | 1226 | ||
1227 | scctl1 |= tv_mode->sc_reset; | 1227 | scctl1 |= tv_mode->sc_reset; |
1228 | scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; | ||
1228 | scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; | 1229 | scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; |
1229 | 1230 | ||
1230 | scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | | 1231 | scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | |
@@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1266 | color_conversion->av); | 1267 | color_conversion->av); |
1267 | } | 1268 | } |
1268 | 1269 | ||
1269 | I915_WRITE(TV_CLR_KNOBS, 0x00606000); | 1270 | if (IS_I965G(dev)) |
1271 | I915_WRITE(TV_CLR_KNOBS, 0x00404000); | ||
1272 | else | ||
1273 | I915_WRITE(TV_CLR_KNOBS, 0x00606000); | ||
1274 | |||
1270 | if (video_levels) | 1275 | if (video_levels) |
1271 | I915_WRITE(TV_CLR_LEVEL, | 1276 | I915_WRITE(TV_CLR_LEVEL, |
1272 | ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | | 1277 | ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | |
@@ -1401,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | |||
1401 | tv_dac = I915_READ(TV_DAC); | 1406 | tv_dac = I915_READ(TV_DAC); |
1402 | I915_WRITE(TV_DAC, save_tv_dac); | 1407 | I915_WRITE(TV_DAC, save_tv_dac); |
1403 | I915_WRITE(TV_CTL, save_tv_ctl); | 1408 | I915_WRITE(TV_CTL, save_tv_ctl); |
1409 | intel_wait_for_vblank(dev); | ||
1404 | } | 1410 | } |
1405 | /* | 1411 | /* |
1406 | * A B C | 1412 | * A B C |
@@ -1451,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector) | |||
1451 | mode = reported_modes[0]; | 1457 | mode = reported_modes[0]; |
1452 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); | 1458 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); |
1453 | 1459 | ||
1454 | if (encoder->crtc) { | 1460 | if (encoder->crtc && encoder->crtc->enabled) { |
1455 | type = intel_tv_detect_type(encoder->crtc, intel_output); | 1461 | type = intel_tv_detect_type(encoder->crtc, intel_output); |
1456 | } else { | 1462 | } else { |
1457 | crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); | 1463 | crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); |
@@ -1462,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector) | |||
1462 | type = -1; | 1468 | type = -1; |
1463 | } | 1469 | } |
1464 | 1470 | ||
1471 | tv_priv->type = type; | ||
1472 | |||
1465 | if (type < 0) | 1473 | if (type < 0) |
1466 | return connector_status_disconnected; | 1474 | return connector_status_disconnected; |
1467 | 1475 | ||
@@ -1495,7 +1503,8 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1495 | struct drm_display_mode *mode_ptr; | 1503 | struct drm_display_mode *mode_ptr; |
1496 | struct intel_output *intel_output = to_intel_output(connector); | 1504 | struct intel_output *intel_output = to_intel_output(connector); |
1497 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1505 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); |
1498 | int j; | 1506 | int j, count = 0; |
1507 | u64 tmp; | ||
1499 | 1508 | ||
1500 | for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); | 1509 | for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); |
1501 | j++) { | 1510 | j++) { |
@@ -1510,8 +1519,9 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1510 | && !tv_mode->component_only)) | 1519 | && !tv_mode->component_only)) |
1511 | continue; | 1520 | continue; |
1512 | 1521 | ||
1513 | mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode), | 1522 | mode_ptr = drm_mode_create(connector->dev); |
1514 | DRM_MEM_DRIVER); | 1523 | if (!mode_ptr) |
1524 | continue; | ||
1515 | strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); | 1525 | strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); |
1516 | 1526 | ||
1517 | mode_ptr->hdisplay = hactive_s; | 1527 | mode_ptr->hdisplay = hactive_s; |
@@ -1528,15 +1538,17 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1528 | mode_ptr->vsync_end = mode_ptr->vsync_start + 1; | 1538 | mode_ptr->vsync_end = mode_ptr->vsync_start + 1; |
1529 | mode_ptr->vtotal = vactive_s + 33; | 1539 | mode_ptr->vtotal = vactive_s + 33; |
1530 | 1540 | ||
1531 | mode_ptr->clock = (int) (tv_mode->refresh * | 1541 | tmp = (u64) tv_mode->refresh * mode_ptr->vtotal; |
1532 | mode_ptr->vtotal * | 1542 | tmp *= mode_ptr->htotal; |
1533 | mode_ptr->htotal / 1000) / 1000; | 1543 | tmp = div_u64(tmp, 1000000); |
1544 | mode_ptr->clock = (int) tmp; | ||
1534 | 1545 | ||
1535 | mode_ptr->type = DRM_MODE_TYPE_DRIVER; | 1546 | mode_ptr->type = DRM_MODE_TYPE_DRIVER; |
1536 | drm_mode_probed_add(connector, mode_ptr); | 1547 | drm_mode_probed_add(connector, mode_ptr); |
1548 | count++; | ||
1537 | } | 1549 | } |
1538 | 1550 | ||
1539 | return 0; | 1551 | return count; |
1540 | } | 1552 | } |
1541 | 1553 | ||
1542 | static void | 1554 | static void |