diff options
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 241 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 53 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 394 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 150 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 116 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 97 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sprite.c | 4 |
16 files changed, 575 insertions, 604 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9717bf42f846..11ae06aac1f1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -564,45 +564,6 @@ static int i915_hws_info(struct seq_file *m, void *data) | |||
564 | return 0; | 564 | return 0; |
565 | } | 565 | } |
566 | 566 | ||
567 | static void i915_dump_object(struct seq_file *m, | ||
568 | struct io_mapping *mapping, | ||
569 | struct drm_i915_gem_object *obj) | ||
570 | { | ||
571 | int page, page_count, i; | ||
572 | |||
573 | page_count = obj->base.size / PAGE_SIZE; | ||
574 | for (page = 0; page < page_count; page++) { | ||
575 | u32 *mem = io_mapping_map_wc(mapping, | ||
576 | obj->gtt_offset + page * PAGE_SIZE); | ||
577 | for (i = 0; i < PAGE_SIZE; i += 4) | ||
578 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); | ||
579 | io_mapping_unmap(mem); | ||
580 | } | ||
581 | } | ||
582 | |||
583 | static int i915_batchbuffer_info(struct seq_file *m, void *data) | ||
584 | { | ||
585 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
586 | struct drm_device *dev = node->minor->dev; | ||
587 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
588 | struct drm_i915_gem_object *obj; | ||
589 | int ret; | ||
590 | |||
591 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
592 | if (ret) | ||
593 | return ret; | ||
594 | |||
595 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
596 | if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { | ||
597 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); | ||
598 | i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); | ||
599 | } | ||
600 | } | ||
601 | |||
602 | mutex_unlock(&dev->struct_mutex); | ||
603 | return 0; | ||
604 | } | ||
605 | |||
606 | static int i915_ringbuffer_data(struct seq_file *m, void *data) | 567 | static int i915_ringbuffer_data(struct seq_file *m, void *data) |
607 | { | 568 | { |
608 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 569 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -669,9 +630,9 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) | |||
669 | static const char *ring_str(int ring) | 630 | static const char *ring_str(int ring) |
670 | { | 631 | { |
671 | switch (ring) { | 632 | switch (ring) { |
672 | case RING_RENDER: return " render"; | 633 | case RCS: return "render"; |
673 | case RING_BSD: return " bsd"; | 634 | case VCS: return "bsd"; |
674 | case RING_BLT: return " blt"; | 635 | case BCS: return "blt"; |
675 | default: return ""; | 636 | default: return ""; |
676 | } | 637 | } |
677 | } | 638 | } |
@@ -714,7 +675,7 @@ static void print_error_buffers(struct seq_file *m, | |||
714 | seq_printf(m, "%s [%d]:\n", name, count); | 675 | seq_printf(m, "%s [%d]:\n", name, count); |
715 | 676 | ||
716 | while (count--) { | 677 | while (count--) { |
717 | seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s", | 678 | seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", |
718 | err->gtt_offset, | 679 | err->gtt_offset, |
719 | err->size, | 680 | err->size, |
720 | err->read_domains, | 681 | err->read_domains, |
@@ -724,6 +685,7 @@ static void print_error_buffers(struct seq_file *m, | |||
724 | tiling_flag(err->tiling), | 685 | tiling_flag(err->tiling), |
725 | dirty_flag(err->dirty), | 686 | dirty_flag(err->dirty), |
726 | purgeable_flag(err->purgeable), | 687 | purgeable_flag(err->purgeable), |
688 | err->ring != -1 ? " " : "", | ||
727 | ring_str(err->ring), | 689 | ring_str(err->ring), |
728 | cache_level_str(err->cache_level)); | 690 | cache_level_str(err->cache_level)); |
729 | 691 | ||
@@ -737,6 +699,32 @@ static void print_error_buffers(struct seq_file *m, | |||
737 | } | 699 | } |
738 | } | 700 | } |
739 | 701 | ||
702 | static void i915_ring_error_state(struct seq_file *m, | ||
703 | struct drm_device *dev, | ||
704 | struct drm_i915_error_state *error, | ||
705 | unsigned ring) | ||
706 | { | ||
707 | seq_printf(m, "%s command stream:\n", ring_str(ring)); | ||
708 | seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); | ||
709 | seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); | ||
710 | seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); | ||
711 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); | ||
712 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); | ||
713 | seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); | ||
714 | if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { | ||
715 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); | ||
716 | seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); | ||
717 | } | ||
718 | if (INTEL_INFO(dev)->gen >= 4) | ||
719 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); | ||
720 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); | ||
721 | if (INTEL_INFO(dev)->gen >= 6) { | ||
722 | seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); | ||
723 | seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); | ||
724 | } | ||
725 | seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); | ||
726 | } | ||
727 | |||
740 | static int i915_error_state(struct seq_file *m, void *unused) | 728 | static int i915_error_state(struct seq_file *m, void *unused) |
741 | { | 729 | { |
742 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 730 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -759,35 +747,20 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
759 | seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); | 747 | seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); |
760 | seq_printf(m, "EIR: 0x%08x\n", error->eir); | 748 | seq_printf(m, "EIR: 0x%08x\n", error->eir); |
761 | seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); | 749 | seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); |
750 | |||
751 | for (i = 0; i < dev_priv->num_fence_regs; i++) | ||
752 | seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); | ||
753 | |||
762 | if (INTEL_INFO(dev)->gen >= 6) { | 754 | if (INTEL_INFO(dev)->gen >= 6) { |
763 | seq_printf(m, "ERROR: 0x%08x\n", error->error); | 755 | seq_printf(m, "ERROR: 0x%08x\n", error->error); |
764 | seq_printf(m, "Blitter command stream:\n"); | 756 | seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); |
765 | seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); | ||
766 | seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); | ||
767 | seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); | ||
768 | seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); | ||
769 | seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); | ||
770 | seq_printf(m, "Video (BSD) command stream:\n"); | ||
771 | seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); | ||
772 | seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); | ||
773 | seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); | ||
774 | seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); | ||
775 | seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); | ||
776 | } | 757 | } |
777 | seq_printf(m, "Render command stream:\n"); | ||
778 | seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); | ||
779 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); | ||
780 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); | ||
781 | seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); | ||
782 | if (INTEL_INFO(dev)->gen >= 4) { | ||
783 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); | ||
784 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps); | ||
785 | } | ||
786 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); | ||
787 | seq_printf(m, " seqno: 0x%08x\n", error->seqno); | ||
788 | 758 | ||
789 | for (i = 0; i < dev_priv->num_fence_regs; i++) | 759 | i915_ring_error_state(m, dev, error, RCS); |
790 | seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); | 760 | if (HAS_BLT(dev)) |
761 | i915_ring_error_state(m, dev, error, BCS); | ||
762 | if (HAS_BSD(dev)) | ||
763 | i915_ring_error_state(m, dev, error, VCS); | ||
791 | 764 | ||
792 | if (error->active_bo) | 765 | if (error->active_bo) |
793 | print_error_buffers(m, "Active", | 766 | print_error_buffers(m, "Active", |
@@ -1415,9 +1388,58 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) | |||
1415 | return 0; | 1388 | return 0; |
1416 | } | 1389 | } |
1417 | 1390 | ||
1391 | static const char *swizzle_string(unsigned swizzle) | ||
1392 | { | ||
1393 | switch(swizzle) { | ||
1394 | case I915_BIT_6_SWIZZLE_NONE: | ||
1395 | return "none"; | ||
1396 | case I915_BIT_6_SWIZZLE_9: | ||
1397 | return "bit9"; | ||
1398 | case I915_BIT_6_SWIZZLE_9_10: | ||
1399 | return "bit9/bit10"; | ||
1400 | case I915_BIT_6_SWIZZLE_9_11: | ||
1401 | return "bit9/bit11"; | ||
1402 | case I915_BIT_6_SWIZZLE_9_10_11: | ||
1403 | return "bit9/bit10/bit11"; | ||
1404 | case I915_BIT_6_SWIZZLE_9_17: | ||
1405 | return "bit9/bit17"; | ||
1406 | case I915_BIT_6_SWIZZLE_9_10_17: | ||
1407 | return "bit9/bit10/bit17"; | ||
1408 | case I915_BIT_6_SWIZZLE_UNKNOWN: | ||
1409 | return "unkown"; | ||
1410 | } | ||
1411 | |||
1412 | return "bug"; | ||
1413 | } | ||
1414 | |||
1415 | static int i915_swizzle_info(struct seq_file *m, void *data) | ||
1416 | { | ||
1417 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
1418 | struct drm_device *dev = node->minor->dev; | ||
1419 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1420 | |||
1421 | mutex_lock(&dev->struct_mutex); | ||
1422 | seq_printf(m, "bit6 swizzle for X-tiling = %s\n", | ||
1423 | swizzle_string(dev_priv->mm.bit_6_swizzle_x)); | ||
1424 | seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", | ||
1425 | swizzle_string(dev_priv->mm.bit_6_swizzle_y)); | ||
1426 | |||
1427 | if (IS_GEN3(dev) || IS_GEN4(dev)) { | ||
1428 | seq_printf(m, "DDC = 0x%08x\n", | ||
1429 | I915_READ(DCC)); | ||
1430 | seq_printf(m, "C0DRB3 = 0x%04x\n", | ||
1431 | I915_READ16(C0DRB3)); | ||
1432 | seq_printf(m, "C1DRB3 = 0x%04x\n", | ||
1433 | I915_READ16(C1DRB3)); | ||
1434 | } | ||
1435 | mutex_unlock(&dev->struct_mutex); | ||
1436 | |||
1437 | return 0; | ||
1438 | } | ||
1439 | |||
1418 | static int | 1440 | static int |
1419 | i915_wedged_open(struct inode *inode, | 1441 | i915_debugfs_common_open(struct inode *inode, |
1420 | struct file *filp) | 1442 | struct file *filp) |
1421 | { | 1443 | { |
1422 | filp->private_data = inode->i_private; | 1444 | filp->private_data = inode->i_private; |
1423 | return 0; | 1445 | return 0; |
@@ -1473,20 +1495,12 @@ i915_wedged_write(struct file *filp, | |||
1473 | 1495 | ||
1474 | static const struct file_operations i915_wedged_fops = { | 1496 | static const struct file_operations i915_wedged_fops = { |
1475 | .owner = THIS_MODULE, | 1497 | .owner = THIS_MODULE, |
1476 | .open = i915_wedged_open, | 1498 | .open = i915_debugfs_common_open, |
1477 | .read = i915_wedged_read, | 1499 | .read = i915_wedged_read, |
1478 | .write = i915_wedged_write, | 1500 | .write = i915_wedged_write, |
1479 | .llseek = default_llseek, | 1501 | .llseek = default_llseek, |
1480 | }; | 1502 | }; |
1481 | 1503 | ||
1482 | static int | ||
1483 | i915_max_freq_open(struct inode *inode, | ||
1484 | struct file *filp) | ||
1485 | { | ||
1486 | filp->private_data = inode->i_private; | ||
1487 | return 0; | ||
1488 | } | ||
1489 | |||
1490 | static ssize_t | 1504 | static ssize_t |
1491 | i915_max_freq_read(struct file *filp, | 1505 | i915_max_freq_read(struct file *filp, |
1492 | char __user *ubuf, | 1506 | char __user *ubuf, |
@@ -1543,20 +1557,12 @@ i915_max_freq_write(struct file *filp, | |||
1543 | 1557 | ||
1544 | static const struct file_operations i915_max_freq_fops = { | 1558 | static const struct file_operations i915_max_freq_fops = { |
1545 | .owner = THIS_MODULE, | 1559 | .owner = THIS_MODULE, |
1546 | .open = i915_max_freq_open, | 1560 | .open = i915_debugfs_common_open, |
1547 | .read = i915_max_freq_read, | 1561 | .read = i915_max_freq_read, |
1548 | .write = i915_max_freq_write, | 1562 | .write = i915_max_freq_write, |
1549 | .llseek = default_llseek, | 1563 | .llseek = default_llseek, |
1550 | }; | 1564 | }; |
1551 | 1565 | ||
1552 | static int | ||
1553 | i915_cache_sharing_open(struct inode *inode, | ||
1554 | struct file *filp) | ||
1555 | { | ||
1556 | filp->private_data = inode->i_private; | ||
1557 | return 0; | ||
1558 | } | ||
1559 | |||
1560 | static ssize_t | 1566 | static ssize_t |
1561 | i915_cache_sharing_read(struct file *filp, | 1567 | i915_cache_sharing_read(struct file *filp, |
1562 | char __user *ubuf, | 1568 | char __user *ubuf, |
@@ -1622,7 +1628,7 @@ i915_cache_sharing_write(struct file *filp, | |||
1622 | 1628 | ||
1623 | static const struct file_operations i915_cache_sharing_fops = { | 1629 | static const struct file_operations i915_cache_sharing_fops = { |
1624 | .owner = THIS_MODULE, | 1630 | .owner = THIS_MODULE, |
1625 | .open = i915_cache_sharing_open, | 1631 | .open = i915_debugfs_common_open, |
1626 | .read = i915_cache_sharing_read, | 1632 | .read = i915_cache_sharing_read, |
1627 | .write = i915_cache_sharing_write, | 1633 | .write = i915_cache_sharing_write, |
1628 | .llseek = default_llseek, | 1634 | .llseek = default_llseek, |
@@ -1654,21 +1660,6 @@ drm_add_fake_info_node(struct drm_minor *minor, | |||
1654 | return 0; | 1660 | return 0; |
1655 | } | 1661 | } |
1656 | 1662 | ||
1657 | static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | ||
1658 | { | ||
1659 | struct drm_device *dev = minor->dev; | ||
1660 | struct dentry *ent; | ||
1661 | |||
1662 | ent = debugfs_create_file("i915_wedged", | ||
1663 | S_IRUGO | S_IWUSR, | ||
1664 | root, dev, | ||
1665 | &i915_wedged_fops); | ||
1666 | if (IS_ERR(ent)) | ||
1667 | return PTR_ERR(ent); | ||
1668 | |||
1669 | return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); | ||
1670 | } | ||
1671 | |||
1672 | static int i915_forcewake_open(struct inode *inode, struct file *file) | 1663 | static int i915_forcewake_open(struct inode *inode, struct file *file) |
1673 | { | 1664 | { |
1674 | struct drm_device *dev = inode->i_private; | 1665 | struct drm_device *dev = inode->i_private; |
@@ -1730,34 +1721,22 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) | |||
1730 | return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); | 1721 | return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); |
1731 | } | 1722 | } |
1732 | 1723 | ||
1733 | static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor) | 1724 | static int i915_debugfs_create(struct dentry *root, |
1734 | { | 1725 | struct drm_minor *minor, |
1735 | struct drm_device *dev = minor->dev; | 1726 | const char *name, |
1736 | struct dentry *ent; | 1727 | const struct file_operations *fops) |
1737 | |||
1738 | ent = debugfs_create_file("i915_max_freq", | ||
1739 | S_IRUGO | S_IWUSR, | ||
1740 | root, dev, | ||
1741 | &i915_max_freq_fops); | ||
1742 | if (IS_ERR(ent)) | ||
1743 | return PTR_ERR(ent); | ||
1744 | |||
1745 | return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops); | ||
1746 | } | ||
1747 | |||
1748 | static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor) | ||
1749 | { | 1728 | { |
1750 | struct drm_device *dev = minor->dev; | 1729 | struct drm_device *dev = minor->dev; |
1751 | struct dentry *ent; | 1730 | struct dentry *ent; |
1752 | 1731 | ||
1753 | ent = debugfs_create_file("i915_cache_sharing", | 1732 | ent = debugfs_create_file(name, |
1754 | S_IRUGO | S_IWUSR, | 1733 | S_IRUGO | S_IWUSR, |
1755 | root, dev, | 1734 | root, dev, |
1756 | &i915_cache_sharing_fops); | 1735 | fops); |
1757 | if (IS_ERR(ent)) | 1736 | if (IS_ERR(ent)) |
1758 | return PTR_ERR(ent); | 1737 | return PTR_ERR(ent); |
1759 | 1738 | ||
1760 | return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops); | 1739 | return drm_add_fake_info_node(minor, ent, fops); |
1761 | } | 1740 | } |
1762 | 1741 | ||
1763 | static struct drm_info_list i915_debugfs_list[] = { | 1742 | static struct drm_info_list i915_debugfs_list[] = { |
@@ -1783,7 +1762,6 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
1783 | {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, | 1762 | {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, |
1784 | {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, | 1763 | {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, |
1785 | {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, | 1764 | {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, |
1786 | {"i915_batchbuffers", i915_batchbuffer_info, 0}, | ||
1787 | {"i915_error_state", i915_error_state, 0}, | 1765 | {"i915_error_state", i915_error_state, 0}, |
1788 | {"i915_rstdby_delays", i915_rstdby_delays, 0}, | 1766 | {"i915_rstdby_delays", i915_rstdby_delays, 0}, |
1789 | {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, | 1767 | {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, |
@@ -1799,6 +1777,7 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
1799 | {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, | 1777 | {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, |
1800 | {"i915_context_status", i915_context_status, 0}, | 1778 | {"i915_context_status", i915_context_status, 0}, |
1801 | {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, | 1779 | {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, |
1780 | {"i915_swizzle_info", i915_swizzle_info, 0}, | ||
1802 | }; | 1781 | }; |
1803 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) | 1782 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) |
1804 | 1783 | ||
@@ -1806,17 +1785,25 @@ int i915_debugfs_init(struct drm_minor *minor) | |||
1806 | { | 1785 | { |
1807 | int ret; | 1786 | int ret; |
1808 | 1787 | ||
1809 | ret = i915_wedged_create(minor->debugfs_root, minor); | 1788 | ret = i915_debugfs_create(minor->debugfs_root, minor, |
1789 | "i915_wedged", | ||
1790 | &i915_wedged_fops); | ||
1810 | if (ret) | 1791 | if (ret) |
1811 | return ret; | 1792 | return ret; |
1812 | 1793 | ||
1813 | ret = i915_forcewake_create(minor->debugfs_root, minor); | 1794 | ret = i915_forcewake_create(minor->debugfs_root, minor); |
1814 | if (ret) | 1795 | if (ret) |
1815 | return ret; | 1796 | return ret; |
1816 | ret = i915_max_freq_create(minor->debugfs_root, minor); | 1797 | |
1798 | ret = i915_debugfs_create(minor->debugfs_root, minor, | ||
1799 | "i915_max_freq", | ||
1800 | &i915_max_freq_fops); | ||
1817 | if (ret) | 1801 | if (ret) |
1818 | return ret; | 1802 | return ret; |
1819 | ret = i915_cache_sharing_create(minor->debugfs_root, minor); | 1803 | |
1804 | ret = i915_debugfs_create(minor->debugfs_root, minor, | ||
1805 | "i915_cache_sharing", | ||
1806 | &i915_cache_sharing_fops); | ||
1820 | if (ret) | 1807 | if (ret) |
1821 | return ret; | 1808 | return ret; |
1822 | 1809 | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 448848cbc1db..8919dcc07ed8 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -2132,7 +2132,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
2132 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | 2132 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
2133 | 2133 | ||
2134 | mutex_lock(&dev->struct_mutex); | 2134 | mutex_lock(&dev->struct_mutex); |
2135 | ret = i915_gpu_idle(dev); | 2135 | ret = i915_gpu_idle(dev, true); |
2136 | if (ret) | 2136 | if (ret) |
2137 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 2137 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
2138 | mutex_unlock(&dev->struct_mutex); | 2138 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 32737a37edd0..68a5338ea867 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -135,6 +135,7 @@ struct drm_i915_fence_reg { | |||
135 | struct list_head lru_list; | 135 | struct list_head lru_list; |
136 | struct drm_i915_gem_object *obj; | 136 | struct drm_i915_gem_object *obj; |
137 | uint32_t setup_seqno; | 137 | uint32_t setup_seqno; |
138 | int pin_count; | ||
138 | }; | 139 | }; |
139 | 140 | ||
140 | struct sdvo_device_mapping { | 141 | struct sdvo_device_mapping { |
@@ -152,26 +153,21 @@ struct drm_i915_error_state { | |||
152 | u32 eir; | 153 | u32 eir; |
153 | u32 pgtbl_er; | 154 | u32 pgtbl_er; |
154 | u32 pipestat[I915_MAX_PIPES]; | 155 | u32 pipestat[I915_MAX_PIPES]; |
155 | u32 ipeir; | 156 | u32 tail[I915_NUM_RINGS]; |
156 | u32 ipehr; | 157 | u32 head[I915_NUM_RINGS]; |
157 | u32 instdone; | 158 | u32 ipeir[I915_NUM_RINGS]; |
158 | u32 acthd; | 159 | u32 ipehr[I915_NUM_RINGS]; |
160 | u32 instdone[I915_NUM_RINGS]; | ||
161 | u32 acthd[I915_NUM_RINGS]; | ||
159 | u32 error; /* gen6+ */ | 162 | u32 error; /* gen6+ */ |
160 | u32 bcs_acthd; /* gen6+ blt engine */ | 163 | u32 instpm[I915_NUM_RINGS]; |
161 | u32 bcs_ipehr; | 164 | u32 instps[I915_NUM_RINGS]; |
162 | u32 bcs_ipeir; | ||
163 | u32 bcs_instdone; | ||
164 | u32 bcs_seqno; | ||
165 | u32 vcs_acthd; /* gen6+ bsd engine */ | ||
166 | u32 vcs_ipehr; | ||
167 | u32 vcs_ipeir; | ||
168 | u32 vcs_instdone; | ||
169 | u32 vcs_seqno; | ||
170 | u32 instpm; | ||
171 | u32 instps; | ||
172 | u32 instdone1; | 165 | u32 instdone1; |
173 | u32 seqno; | 166 | u32 seqno[I915_NUM_RINGS]; |
174 | u64 bbaddr; | 167 | u64 bbaddr; |
168 | u32 fault_reg[I915_NUM_RINGS]; | ||
169 | u32 done_reg; | ||
170 | u32 faddr[I915_NUM_RINGS]; | ||
175 | u64 fence[I915_MAX_NUM_FENCES]; | 171 | u64 fence[I915_MAX_NUM_FENCES]; |
176 | struct timeval time; | 172 | struct timeval time; |
177 | struct drm_i915_error_object { | 173 | struct drm_i915_error_object { |
@@ -1170,6 +1166,24 @@ int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
1170 | struct intel_ring_buffer *pipelined); | 1166 | struct intel_ring_buffer *pipelined); |
1171 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); | 1167 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
1172 | 1168 | ||
1169 | static inline void | ||
1170 | i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) | ||
1171 | { | ||
1172 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | ||
1173 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | ||
1174 | dev_priv->fence_regs[obj->fence_reg].pin_count++; | ||
1175 | } | ||
1176 | } | ||
1177 | |||
1178 | static inline void | ||
1179 | i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) | ||
1180 | { | ||
1181 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | ||
1182 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | ||
1183 | dev_priv->fence_regs[obj->fence_reg].pin_count--; | ||
1184 | } | ||
1185 | } | ||
1186 | |||
1173 | void i915_gem_retire_requests(struct drm_device *dev); | 1187 | void i915_gem_retire_requests(struct drm_device *dev); |
1174 | void i915_gem_reset(struct drm_device *dev); | 1188 | void i915_gem_reset(struct drm_device *dev); |
1175 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); | 1189 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
@@ -1183,13 +1197,14 @@ void i915_gem_do_init(struct drm_device *dev, | |||
1183 | unsigned long start, | 1197 | unsigned long start, |
1184 | unsigned long mappable_end, | 1198 | unsigned long mappable_end, |
1185 | unsigned long end); | 1199 | unsigned long end); |
1186 | int __must_check i915_gpu_idle(struct drm_device *dev); | 1200 | int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire); |
1187 | int __must_check i915_gem_idle(struct drm_device *dev); | 1201 | int __must_check i915_gem_idle(struct drm_device *dev); |
1188 | int __must_check i915_add_request(struct intel_ring_buffer *ring, | 1202 | int __must_check i915_add_request(struct intel_ring_buffer *ring, |
1189 | struct drm_file *file, | 1203 | struct drm_file *file, |
1190 | struct drm_i915_gem_request *request); | 1204 | struct drm_i915_gem_request *request); |
1191 | int __must_check i915_wait_request(struct intel_ring_buffer *ring, | 1205 | int __must_check i915_wait_request(struct intel_ring_buffer *ring, |
1192 | uint32_t seqno); | 1206 | uint32_t seqno, |
1207 | bool do_retire); | ||
1193 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 1208 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
1194 | int __must_check | 1209 | int __must_check |
1195 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, | 1210 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index eb98a7f55cfe..51a2b0c2a30d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -58,6 +58,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); | |||
58 | 58 | ||
59 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, | 59 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, |
60 | struct shrink_control *sc); | 60 | struct shrink_control *sc); |
61 | static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); | ||
61 | 62 | ||
62 | /* some bookkeeping */ | 63 | /* some bookkeeping */ |
63 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, | 64 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, |
@@ -258,73 +259,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) | |||
258 | obj->tiling_mode != I915_TILING_NONE; | 259 | obj->tiling_mode != I915_TILING_NONE; |
259 | } | 260 | } |
260 | 261 | ||
261 | static inline void | ||
262 | slow_shmem_copy(struct page *dst_page, | ||
263 | int dst_offset, | ||
264 | struct page *src_page, | ||
265 | int src_offset, | ||
266 | int length) | ||
267 | { | ||
268 | char *dst_vaddr, *src_vaddr; | ||
269 | |||
270 | dst_vaddr = kmap(dst_page); | ||
271 | src_vaddr = kmap(src_page); | ||
272 | |||
273 | memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); | ||
274 | |||
275 | kunmap(src_page); | ||
276 | kunmap(dst_page); | ||
277 | } | ||
278 | |||
279 | static inline void | ||
280 | slow_shmem_bit17_copy(struct page *gpu_page, | ||
281 | int gpu_offset, | ||
282 | struct page *cpu_page, | ||
283 | int cpu_offset, | ||
284 | int length, | ||
285 | int is_read) | ||
286 | { | ||
287 | char *gpu_vaddr, *cpu_vaddr; | ||
288 | |||
289 | /* Use the unswizzled path if this page isn't affected. */ | ||
290 | if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { | ||
291 | if (is_read) | ||
292 | return slow_shmem_copy(cpu_page, cpu_offset, | ||
293 | gpu_page, gpu_offset, length); | ||
294 | else | ||
295 | return slow_shmem_copy(gpu_page, gpu_offset, | ||
296 | cpu_page, cpu_offset, length); | ||
297 | } | ||
298 | |||
299 | gpu_vaddr = kmap(gpu_page); | ||
300 | cpu_vaddr = kmap(cpu_page); | ||
301 | |||
302 | /* Copy the data, XORing A6 with A17 (1). The user already knows he's | ||
303 | * XORing with the other bits (A9 for Y, A9 and A10 for X) | ||
304 | */ | ||
305 | while (length > 0) { | ||
306 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | ||
307 | int this_length = min(cacheline_end - gpu_offset, length); | ||
308 | int swizzled_gpu_offset = gpu_offset ^ 64; | ||
309 | |||
310 | if (is_read) { | ||
311 | memcpy(cpu_vaddr + cpu_offset, | ||
312 | gpu_vaddr + swizzled_gpu_offset, | ||
313 | this_length); | ||
314 | } else { | ||
315 | memcpy(gpu_vaddr + swizzled_gpu_offset, | ||
316 | cpu_vaddr + cpu_offset, | ||
317 | this_length); | ||
318 | } | ||
319 | cpu_offset += this_length; | ||
320 | gpu_offset += this_length; | ||
321 | length -= this_length; | ||
322 | } | ||
323 | |||
324 | kunmap(cpu_page); | ||
325 | kunmap(gpu_page); | ||
326 | } | ||
327 | |||
328 | /** | 262 | /** |
329 | * This is the fast shmem pread path, which attempts to copy_from_user directly | 263 | * This is the fast shmem pread path, which attempts to copy_from_user directly |
330 | * from the backing pages of the object to the user's address space. On a | 264 | * from the backing pages of the object to the user's address space. On a |
@@ -385,6 +319,58 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, | |||
385 | return 0; | 319 | return 0; |
386 | } | 320 | } |
387 | 321 | ||
322 | static inline int | ||
323 | __copy_to_user_swizzled(char __user *cpu_vaddr, | ||
324 | const char *gpu_vaddr, int gpu_offset, | ||
325 | int length) | ||
326 | { | ||
327 | int ret, cpu_offset = 0; | ||
328 | |||
329 | while (length > 0) { | ||
330 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | ||
331 | int this_length = min(cacheline_end - gpu_offset, length); | ||
332 | int swizzled_gpu_offset = gpu_offset ^ 64; | ||
333 | |||
334 | ret = __copy_to_user(cpu_vaddr + cpu_offset, | ||
335 | gpu_vaddr + swizzled_gpu_offset, | ||
336 | this_length); | ||
337 | if (ret) | ||
338 | return ret + length; | ||
339 | |||
340 | cpu_offset += this_length; | ||
341 | gpu_offset += this_length; | ||
342 | length -= this_length; | ||
343 | } | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static inline int | ||
349 | __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset, | ||
350 | const char *cpu_vaddr, | ||
351 | int length) | ||
352 | { | ||
353 | int ret, cpu_offset = 0; | ||
354 | |||
355 | while (length > 0) { | ||
356 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | ||
357 | int this_length = min(cacheline_end - gpu_offset, length); | ||
358 | int swizzled_gpu_offset = gpu_offset ^ 64; | ||
359 | |||
360 | ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, | ||
361 | cpu_vaddr + cpu_offset, | ||
362 | this_length); | ||
363 | if (ret) | ||
364 | return ret + length; | ||
365 | |||
366 | cpu_offset += this_length; | ||
367 | gpu_offset += this_length; | ||
368 | length -= this_length; | ||
369 | } | ||
370 | |||
371 | return 0; | ||
372 | } | ||
373 | |||
388 | /** | 374 | /** |
389 | * This is the fallback shmem pread path, which allocates temporary storage | 375 | * This is the fallback shmem pread path, which allocates temporary storage |
390 | * in kernel space to copy_to_user into outside of the struct_mutex, so we | 376 | * in kernel space to copy_to_user into outside of the struct_mutex, so we |
@@ -398,72 +384,34 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, | |||
398 | struct drm_file *file) | 384 | struct drm_file *file) |
399 | { | 385 | { |
400 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | 386 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
401 | struct mm_struct *mm = current->mm; | 387 | char __user *user_data; |
402 | struct page **user_pages; | ||
403 | ssize_t remain; | 388 | ssize_t remain; |
404 | loff_t offset, pinned_pages, i; | 389 | loff_t offset; |
405 | loff_t first_data_page, last_data_page, num_pages; | 390 | int shmem_page_offset, page_length, ret; |
406 | int shmem_page_offset; | 391 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; |
407 | int data_page_index, data_page_offset; | ||
408 | int page_length; | ||
409 | int ret; | ||
410 | uint64_t data_ptr = args->data_ptr; | ||
411 | int do_bit17_swizzling; | ||
412 | 392 | ||
393 | user_data = (char __user *) (uintptr_t) args->data_ptr; | ||
413 | remain = args->size; | 394 | remain = args->size; |
414 | 395 | ||
415 | /* Pin the user pages containing the data. We can't fault while | 396 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
416 | * holding the struct mutex, yet we want to hold it while | ||
417 | * dereferencing the user data. | ||
418 | */ | ||
419 | first_data_page = data_ptr / PAGE_SIZE; | ||
420 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | ||
421 | num_pages = last_data_page - first_data_page + 1; | ||
422 | 397 | ||
423 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); | 398 | offset = args->offset; |
424 | if (user_pages == NULL) | ||
425 | return -ENOMEM; | ||
426 | 399 | ||
427 | mutex_unlock(&dev->struct_mutex); | 400 | mutex_unlock(&dev->struct_mutex); |
428 | down_read(&mm->mmap_sem); | ||
429 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | ||
430 | num_pages, 1, 0, user_pages, NULL); | ||
431 | up_read(&mm->mmap_sem); | ||
432 | mutex_lock(&dev->struct_mutex); | ||
433 | if (pinned_pages < num_pages) { | ||
434 | ret = -EFAULT; | ||
435 | goto out; | ||
436 | } | ||
437 | |||
438 | ret = i915_gem_object_set_cpu_read_domain_range(obj, | ||
439 | args->offset, | ||
440 | args->size); | ||
441 | if (ret) | ||
442 | goto out; | ||
443 | |||
444 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | ||
445 | |||
446 | offset = args->offset; | ||
447 | 401 | ||
448 | while (remain > 0) { | 402 | while (remain > 0) { |
449 | struct page *page; | 403 | struct page *page; |
404 | char *vaddr; | ||
450 | 405 | ||
451 | /* Operation in this page | 406 | /* Operation in this page |
452 | * | 407 | * |
453 | * shmem_page_offset = offset within page in shmem file | 408 | * shmem_page_offset = offset within page in shmem file |
454 | * data_page_index = page number in get_user_pages return | ||
455 | * data_page_offset = offset with data_page_index page. | ||
456 | * page_length = bytes to copy for this page | 409 | * page_length = bytes to copy for this page |
457 | */ | 410 | */ |
458 | shmem_page_offset = offset_in_page(offset); | 411 | shmem_page_offset = offset_in_page(offset); |
459 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | ||
460 | data_page_offset = offset_in_page(data_ptr); | ||
461 | |||
462 | page_length = remain; | 412 | page_length = remain; |
463 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 413 | if ((shmem_page_offset + page_length) > PAGE_SIZE) |
464 | page_length = PAGE_SIZE - shmem_page_offset; | 414 | page_length = PAGE_SIZE - shmem_page_offset; |
465 | if ((data_page_offset + page_length) > PAGE_SIZE) | ||
466 | page_length = PAGE_SIZE - data_page_offset; | ||
467 | 415 | ||
468 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); | 416 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
469 | if (IS_ERR(page)) { | 417 | if (IS_ERR(page)) { |
@@ -471,36 +419,38 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, | |||
471 | goto out; | 419 | goto out; |
472 | } | 420 | } |
473 | 421 | ||
474 | if (do_bit17_swizzling) { | 422 | page_do_bit17_swizzling = obj_do_bit17_swizzling && |
475 | slow_shmem_bit17_copy(page, | 423 | (page_to_phys(page) & (1 << 17)) != 0; |
476 | shmem_page_offset, | 424 | |
477 | user_pages[data_page_index], | 425 | vaddr = kmap(page); |
478 | data_page_offset, | 426 | if (page_do_bit17_swizzling) |
479 | page_length, | 427 | ret = __copy_to_user_swizzled(user_data, |
480 | 1); | 428 | vaddr, shmem_page_offset, |
481 | } else { | 429 | page_length); |
482 | slow_shmem_copy(user_pages[data_page_index], | 430 | else |
483 | data_page_offset, | 431 | ret = __copy_to_user(user_data, |
484 | page, | 432 | vaddr + shmem_page_offset, |
485 | shmem_page_offset, | 433 | page_length); |
486 | page_length); | 434 | kunmap(page); |
487 | } | ||
488 | 435 | ||
489 | mark_page_accessed(page); | 436 | mark_page_accessed(page); |
490 | page_cache_release(page); | 437 | page_cache_release(page); |
491 | 438 | ||
439 | if (ret) { | ||
440 | ret = -EFAULT; | ||
441 | goto out; | ||
442 | } | ||
443 | |||
492 | remain -= page_length; | 444 | remain -= page_length; |
493 | data_ptr += page_length; | 445 | user_data += page_length; |
494 | offset += page_length; | 446 | offset += page_length; |
495 | } | 447 | } |
496 | 448 | ||
497 | out: | 449 | out: |
498 | for (i = 0; i < pinned_pages; i++) { | 450 | mutex_lock(&dev->struct_mutex); |
499 | SetPageDirty(user_pages[i]); | 451 | /* Fixup: Kill any reinstated backing storage pages */ |
500 | mark_page_accessed(user_pages[i]); | 452 | if (obj->madv == __I915_MADV_PURGED) |
501 | page_cache_release(user_pages[i]); | 453 | i915_gem_object_truncate(obj); |
502 | } | ||
503 | drm_free_large(user_pages); | ||
504 | 454 | ||
505 | return ret; | 455 | return ret; |
506 | } | 456 | } |
@@ -841,71 +791,36 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, | |||
841 | struct drm_file *file) | 791 | struct drm_file *file) |
842 | { | 792 | { |
843 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | 793 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
844 | struct mm_struct *mm = current->mm; | ||
845 | struct page **user_pages; | ||
846 | ssize_t remain; | 794 | ssize_t remain; |
847 | loff_t offset, pinned_pages, i; | 795 | loff_t offset; |
848 | loff_t first_data_page, last_data_page, num_pages; | 796 | char __user *user_data; |
849 | int shmem_page_offset; | 797 | int shmem_page_offset, page_length, ret; |
850 | int data_page_index, data_page_offset; | 798 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; |
851 | int page_length; | ||
852 | int ret; | ||
853 | uint64_t data_ptr = args->data_ptr; | ||
854 | int do_bit17_swizzling; | ||
855 | 799 | ||
800 | user_data = (char __user *) (uintptr_t) args->data_ptr; | ||
856 | remain = args->size; | 801 | remain = args->size; |
857 | 802 | ||
858 | /* Pin the user pages containing the data. We can't fault while | 803 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
859 | * holding the struct mutex, and all of the pwrite implementations | ||
860 | * want to hold it while dereferencing the user data. | ||
861 | */ | ||
862 | first_data_page = data_ptr / PAGE_SIZE; | ||
863 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | ||
864 | num_pages = last_data_page - first_data_page + 1; | ||
865 | |||
866 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); | ||
867 | if (user_pages == NULL) | ||
868 | return -ENOMEM; | ||
869 | |||
870 | mutex_unlock(&dev->struct_mutex); | ||
871 | down_read(&mm->mmap_sem); | ||
872 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | ||
873 | num_pages, 0, 0, user_pages, NULL); | ||
874 | up_read(&mm->mmap_sem); | ||
875 | mutex_lock(&dev->struct_mutex); | ||
876 | if (pinned_pages < num_pages) { | ||
877 | ret = -EFAULT; | ||
878 | goto out; | ||
879 | } | ||
880 | |||
881 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
882 | if (ret) | ||
883 | goto out; | ||
884 | |||
885 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | ||
886 | 804 | ||
887 | offset = args->offset; | 805 | offset = args->offset; |
888 | obj->dirty = 1; | 806 | obj->dirty = 1; |
889 | 807 | ||
808 | mutex_unlock(&dev->struct_mutex); | ||
809 | |||
890 | while (remain > 0) { | 810 | while (remain > 0) { |
891 | struct page *page; | 811 | struct page *page; |
812 | char *vaddr; | ||
892 | 813 | ||
893 | /* Operation in this page | 814 | /* Operation in this page |
894 | * | 815 | * |
895 | * shmem_page_offset = offset within page in shmem file | 816 | * shmem_page_offset = offset within page in shmem file |
896 | * data_page_index = page number in get_user_pages return | ||
897 | * data_page_offset = offset with data_page_index page. | ||
898 | * page_length = bytes to copy for this page | 817 | * page_length = bytes to copy for this page |
899 | */ | 818 | */ |
900 | shmem_page_offset = offset_in_page(offset); | 819 | shmem_page_offset = offset_in_page(offset); |
901 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | ||
902 | data_page_offset = offset_in_page(data_ptr); | ||
903 | 820 | ||
904 | page_length = remain; | 821 | page_length = remain; |
905 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 822 | if ((shmem_page_offset + page_length) > PAGE_SIZE) |
906 | page_length = PAGE_SIZE - shmem_page_offset; | 823 | page_length = PAGE_SIZE - shmem_page_offset; |
907 | if ((data_page_offset + page_length) > PAGE_SIZE) | ||
908 | page_length = PAGE_SIZE - data_page_offset; | ||
909 | 824 | ||
910 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); | 825 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
911 | if (IS_ERR(page)) { | 826 | if (IS_ERR(page)) { |
@@ -913,34 +828,45 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, | |||
913 | goto out; | 828 | goto out; |
914 | } | 829 | } |
915 | 830 | ||
916 | if (do_bit17_swizzling) { | 831 | page_do_bit17_swizzling = obj_do_bit17_swizzling && |
917 | slow_shmem_bit17_copy(page, | 832 | (page_to_phys(page) & (1 << 17)) != 0; |
918 | shmem_page_offset, | 833 | |
919 | user_pages[data_page_index], | 834 | vaddr = kmap(page); |
920 | data_page_offset, | 835 | if (page_do_bit17_swizzling) |
921 | page_length, | 836 | ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, |
922 | 0); | 837 | user_data, |
923 | } else { | 838 | page_length); |
924 | slow_shmem_copy(page, | 839 | else |
925 | shmem_page_offset, | 840 | ret = __copy_from_user(vaddr + shmem_page_offset, |
926 | user_pages[data_page_index], | 841 | user_data, |
927 | data_page_offset, | 842 | page_length); |
928 | page_length); | 843 | kunmap(page); |
929 | } | ||
930 | 844 | ||
931 | set_page_dirty(page); | 845 | set_page_dirty(page); |
932 | mark_page_accessed(page); | 846 | mark_page_accessed(page); |
933 | page_cache_release(page); | 847 | page_cache_release(page); |
934 | 848 | ||
849 | if (ret) { | ||
850 | ret = -EFAULT; | ||
851 | goto out; | ||
852 | } | ||
853 | |||
935 | remain -= page_length; | 854 | remain -= page_length; |
936 | data_ptr += page_length; | 855 | user_data += page_length; |
937 | offset += page_length; | 856 | offset += page_length; |
938 | } | 857 | } |
939 | 858 | ||
940 | out: | 859 | out: |
941 | for (i = 0; i < pinned_pages; i++) | 860 | mutex_lock(&dev->struct_mutex); |
942 | page_cache_release(user_pages[i]); | 861 | /* Fixup: Kill any reinstated backing storage pages */ |
943 | drm_free_large(user_pages); | 862 | if (obj->madv == __I915_MADV_PURGED) |
863 | i915_gem_object_truncate(obj); | ||
864 | /* and flush dirty cachelines in case the object isn't in the cpu write | ||
865 | * domain anymore. */ | ||
866 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | ||
867 | i915_gem_clflush_object(obj); | ||
868 | intel_gtt_chipset_flush(); | ||
869 | } | ||
944 | 870 | ||
945 | return ret; | 871 | return ret; |
946 | } | 872 | } |
@@ -996,10 +922,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
996 | * pread/pwrite currently are reading and writing from the CPU | 922 | * pread/pwrite currently are reading and writing from the CPU |
997 | * perspective, requiring manual detiling by the client. | 923 | * perspective, requiring manual detiling by the client. |
998 | */ | 924 | */ |
999 | if (obj->phys_obj) | 925 | if (obj->phys_obj) { |
1000 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | 926 | ret = i915_gem_phys_pwrite(dev, obj, args, file); |
1001 | else if (obj->gtt_space && | 927 | goto out; |
1002 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | 928 | } |
929 | |||
930 | if (obj->gtt_space && | ||
931 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | ||
1003 | ret = i915_gem_object_pin(obj, 0, true); | 932 | ret = i915_gem_object_pin(obj, 0, true); |
1004 | if (ret) | 933 | if (ret) |
1005 | goto out; | 934 | goto out; |
@@ -1018,18 +947,24 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1018 | 947 | ||
1019 | out_unpin: | 948 | out_unpin: |
1020 | i915_gem_object_unpin(obj); | 949 | i915_gem_object_unpin(obj); |
1021 | } else { | ||
1022 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
1023 | if (ret) | ||
1024 | goto out; | ||
1025 | 950 | ||
1026 | ret = -EFAULT; | 951 | if (ret != -EFAULT) |
1027 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | 952 | goto out; |
1028 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); | 953 | /* Fall through to the shmfs paths because the gtt paths might |
1029 | if (ret == -EFAULT) | 954 | * fail with non-page-backed user pointers (e.g. gtt mappings |
1030 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); | 955 | * when moving data between textures). */ |
1031 | } | 956 | } |
1032 | 957 | ||
958 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
959 | if (ret) | ||
960 | goto out; | ||
961 | |||
962 | ret = -EFAULT; | ||
963 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | ||
964 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); | ||
965 | if (ret == -EFAULT) | ||
966 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); | ||
967 | |||
1033 | out: | 968 | out: |
1034 | drm_gem_object_unreference(&obj->base); | 969 | drm_gem_object_unreference(&obj->base); |
1035 | unlock: | 970 | unlock: |
@@ -1141,7 +1076,6 @@ int | |||
1141 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 1076 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
1142 | struct drm_file *file) | 1077 | struct drm_file *file) |
1143 | { | 1078 | { |
1144 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1145 | struct drm_i915_gem_mmap *args = data; | 1079 | struct drm_i915_gem_mmap *args = data; |
1146 | struct drm_gem_object *obj; | 1080 | struct drm_gem_object *obj; |
1147 | unsigned long addr; | 1081 | unsigned long addr; |
@@ -1153,11 +1087,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1153 | if (obj == NULL) | 1087 | if (obj == NULL) |
1154 | return -ENOENT; | 1088 | return -ENOENT; |
1155 | 1089 | ||
1156 | if (obj->size > dev_priv->mm.gtt_mappable_end) { | ||
1157 | drm_gem_object_unreference_unlocked(obj); | ||
1158 | return -E2BIG; | ||
1159 | } | ||
1160 | |||
1161 | down_write(¤t->mm->mmap_sem); | 1090 | down_write(¤t->mm->mmap_sem); |
1162 | addr = do_mmap(obj->filp, 0, args->size, | 1091 | addr = do_mmap(obj->filp, 0, args->size, |
1163 | PROT_READ | PROT_WRITE, MAP_SHARED, | 1092 | PROT_READ | PROT_WRITE, MAP_SHARED, |
@@ -1943,7 +1872,8 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1943 | */ | 1872 | */ |
1944 | int | 1873 | int |
1945 | i915_wait_request(struct intel_ring_buffer *ring, | 1874 | i915_wait_request(struct intel_ring_buffer *ring, |
1946 | uint32_t seqno) | 1875 | uint32_t seqno, |
1876 | bool do_retire) | ||
1947 | { | 1877 | { |
1948 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 1878 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1949 | u32 ier; | 1879 | u32 ier; |
@@ -2027,7 +1957,7 @@ i915_wait_request(struct intel_ring_buffer *ring, | |||
2027 | * buffer to have made it to the inactive list, and we would need | 1957 | * buffer to have made it to the inactive list, and we would need |
2028 | * a separate wait queue to handle that. | 1958 | * a separate wait queue to handle that. |
2029 | */ | 1959 | */ |
2030 | if (ret == 0) | 1960 | if (ret == 0 && do_retire) |
2031 | i915_gem_retire_requests_ring(ring); | 1961 | i915_gem_retire_requests_ring(ring); |
2032 | 1962 | ||
2033 | return ret; | 1963 | return ret; |
@@ -2051,7 +1981,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) | |||
2051 | * it. | 1981 | * it. |
2052 | */ | 1982 | */ |
2053 | if (obj->active) { | 1983 | if (obj->active) { |
2054 | ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); | 1984 | ret = i915_wait_request(obj->ring, obj->last_rendering_seqno, |
1985 | true); | ||
2055 | if (ret) | 1986 | if (ret) |
2056 | return ret; | 1987 | return ret; |
2057 | } | 1988 | } |
@@ -2172,7 +2103,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring, | |||
2172 | return 0; | 2103 | return 0; |
2173 | } | 2104 | } |
2174 | 2105 | ||
2175 | static int i915_ring_idle(struct intel_ring_buffer *ring) | 2106 | static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire) |
2176 | { | 2107 | { |
2177 | int ret; | 2108 | int ret; |
2178 | 2109 | ||
@@ -2186,18 +2117,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring) | |||
2186 | return ret; | 2117 | return ret; |
2187 | } | 2118 | } |
2188 | 2119 | ||
2189 | return i915_wait_request(ring, i915_gem_next_request_seqno(ring)); | 2120 | return i915_wait_request(ring, i915_gem_next_request_seqno(ring), |
2121 | do_retire); | ||
2190 | } | 2122 | } |
2191 | 2123 | ||
2192 | int | 2124 | int i915_gpu_idle(struct drm_device *dev, bool do_retire) |
2193 | i915_gpu_idle(struct drm_device *dev) | ||
2194 | { | 2125 | { |
2195 | drm_i915_private_t *dev_priv = dev->dev_private; | 2126 | drm_i915_private_t *dev_priv = dev->dev_private; |
2196 | int ret, i; | 2127 | int ret, i; |
2197 | 2128 | ||
2198 | /* Flush everything onto the inactive list. */ | 2129 | /* Flush everything onto the inactive list. */ |
2199 | for (i = 0; i < I915_NUM_RINGS; i++) { | 2130 | for (i = 0; i < I915_NUM_RINGS; i++) { |
2200 | ret = i915_ring_idle(&dev_priv->ring[i]); | 2131 | ret = i915_ring_idle(&dev_priv->ring[i], do_retire); |
2201 | if (ret) | 2132 | if (ret) |
2202 | return ret; | 2133 | return ret; |
2203 | } | 2134 | } |
@@ -2400,7 +2331,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | |||
2400 | if (!ring_passed_seqno(obj->last_fenced_ring, | 2331 | if (!ring_passed_seqno(obj->last_fenced_ring, |
2401 | obj->last_fenced_seqno)) { | 2332 | obj->last_fenced_seqno)) { |
2402 | ret = i915_wait_request(obj->last_fenced_ring, | 2333 | ret = i915_wait_request(obj->last_fenced_ring, |
2403 | obj->last_fenced_seqno); | 2334 | obj->last_fenced_seqno, |
2335 | true); | ||
2404 | if (ret) | 2336 | if (ret) |
2405 | return ret; | 2337 | return ret; |
2406 | } | 2338 | } |
@@ -2432,6 +2364,8 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj) | |||
2432 | 2364 | ||
2433 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | 2365 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
2434 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 2366 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2367 | |||
2368 | WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count); | ||
2435 | i915_gem_clear_fence_reg(obj->base.dev, | 2369 | i915_gem_clear_fence_reg(obj->base.dev, |
2436 | &dev_priv->fence_regs[obj->fence_reg]); | 2370 | &dev_priv->fence_regs[obj->fence_reg]); |
2437 | 2371 | ||
@@ -2456,7 +2390,7 @@ i915_find_fence_reg(struct drm_device *dev, | |||
2456 | if (!reg->obj) | 2390 | if (!reg->obj) |
2457 | return reg; | 2391 | return reg; |
2458 | 2392 | ||
2459 | if (!reg->obj->pin_count) | 2393 | if (!reg->pin_count) |
2460 | avail = reg; | 2394 | avail = reg; |
2461 | } | 2395 | } |
2462 | 2396 | ||
@@ -2466,7 +2400,7 @@ i915_find_fence_reg(struct drm_device *dev, | |||
2466 | /* None available, try to steal one or wait for a user to finish */ | 2400 | /* None available, try to steal one or wait for a user to finish */ |
2467 | avail = first = NULL; | 2401 | avail = first = NULL; |
2468 | list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { | 2402 | list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { |
2469 | if (reg->obj->pin_count) | 2403 | if (reg->pin_count) |
2470 | continue; | 2404 | continue; |
2471 | 2405 | ||
2472 | if (first == NULL) | 2406 | if (first == NULL) |
@@ -2541,7 +2475,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2541 | if (!ring_passed_seqno(obj->last_fenced_ring, | 2475 | if (!ring_passed_seqno(obj->last_fenced_ring, |
2542 | reg->setup_seqno)) { | 2476 | reg->setup_seqno)) { |
2543 | ret = i915_wait_request(obj->last_fenced_ring, | 2477 | ret = i915_wait_request(obj->last_fenced_ring, |
2544 | reg->setup_seqno); | 2478 | reg->setup_seqno, |
2479 | true); | ||
2545 | if (ret) | 2480 | if (ret) |
2546 | return ret; | 2481 | return ret; |
2547 | } | 2482 | } |
@@ -2560,7 +2495,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2560 | 2495 | ||
2561 | reg = i915_find_fence_reg(dev, pipelined); | 2496 | reg = i915_find_fence_reg(dev, pipelined); |
2562 | if (reg == NULL) | 2497 | if (reg == NULL) |
2563 | return -ENOSPC; | 2498 | return -EDEADLK; |
2564 | 2499 | ||
2565 | ret = i915_gem_object_flush_fence(obj, pipelined); | 2500 | ret = i915_gem_object_flush_fence(obj, pipelined); |
2566 | if (ret) | 2501 | if (ret) |
@@ -2660,6 +2595,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev, | |||
2660 | list_del_init(®->lru_list); | 2595 | list_del_init(®->lru_list); |
2661 | reg->obj = NULL; | 2596 | reg->obj = NULL; |
2662 | reg->setup_seqno = 0; | 2597 | reg->setup_seqno = 0; |
2598 | reg->pin_count = 0; | ||
2663 | } | 2599 | } |
2664 | 2600 | ||
2665 | /** | 2601 | /** |
@@ -3710,7 +3646,7 @@ i915_gem_idle(struct drm_device *dev) | |||
3710 | return 0; | 3646 | return 0; |
3711 | } | 3647 | } |
3712 | 3648 | ||
3713 | ret = i915_gpu_idle(dev); | 3649 | ret = i915_gpu_idle(dev, true); |
3714 | if (ret) { | 3650 | if (ret) { |
3715 | mutex_unlock(&dev->struct_mutex); | 3651 | mutex_unlock(&dev->struct_mutex); |
3716 | return ret; | 3652 | return ret; |
@@ -4201,7 +4137,7 @@ rescan: | |||
4201 | * This has a dramatic impact to reduce the number of | 4137 | * This has a dramatic impact to reduce the number of |
4202 | * OOM-killer events whilst running the GPU aggressively. | 4138 | * OOM-killer events whilst running the GPU aggressively. |
4203 | */ | 4139 | */ |
4204 | if (i915_gpu_idle(dev) == 0) | 4140 | if (i915_gpu_idle(dev, true) == 0) |
4205 | goto rescan; | 4141 | goto rescan; |
4206 | } | 4142 | } |
4207 | mutex_unlock(&dev->struct_mutex); | 4143 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index ead5d00f91b0..097119caa36a 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -195,7 +195,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | |||
195 | trace_i915_gem_evict_everything(dev, purgeable_only); | 195 | trace_i915_gem_evict_everything(dev, purgeable_only); |
196 | 196 | ||
197 | /* Flush everything (on to the inactive lists) and evict */ | 197 | /* Flush everything (on to the inactive lists) and evict */ |
198 | ret = i915_gpu_idle(dev); | 198 | ret = i915_gpu_idle(dev, true); |
199 | if (ret) | 199 | if (ret) |
200 | return ret; | 200 | return ret; |
201 | 201 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 65e1f0043f9d..c719df19b3de 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -203,9 +203,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, | |||
203 | cd->invalidate_domains |= invalidate_domains; | 203 | cd->invalidate_domains |= invalidate_domains; |
204 | cd->flush_domains |= flush_domains; | 204 | cd->flush_domains |= flush_domains; |
205 | if (flush_domains & I915_GEM_GPU_DOMAINS) | 205 | if (flush_domains & I915_GEM_GPU_DOMAINS) |
206 | cd->flush_rings |= obj->ring->id; | 206 | cd->flush_rings |= intel_ring_flag(obj->ring); |
207 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) | 207 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) |
208 | cd->flush_rings |= ring->id; | 208 | cd->flush_rings |= intel_ring_flag(ring); |
209 | } | 209 | } |
210 | 210 | ||
211 | struct eb_objects { | 211 | struct eb_objects { |
@@ -303,8 +303,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
303 | reloc->write_domain); | 303 | reloc->write_domain); |
304 | return ret; | 304 | return ret; |
305 | } | 305 | } |
306 | if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) { | 306 | if (unlikely((reloc->write_domain | reloc->read_domains) |
307 | DRM_ERROR("reloc with read/write CPU domains: " | 307 | & ~I915_GEM_GPU_DOMAINS)) { |
308 | DRM_ERROR("reloc with read/write non-GPU domains: " | ||
308 | "obj %p target %d offset %d " | 309 | "obj %p target %d offset %d " |
309 | "read %08x write %08x", | 310 | "read %08x write %08x", |
310 | obj, reloc->target_handle, | 311 | obj, reloc->target_handle, |
@@ -461,6 +462,54 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, | |||
461 | return ret; | 462 | return ret; |
462 | } | 463 | } |
463 | 464 | ||
465 | #define __EXEC_OBJECT_HAS_FENCE (1<<31) | ||
466 | |||
467 | static int | ||
468 | pin_and_fence_object(struct drm_i915_gem_object *obj, | ||
469 | struct intel_ring_buffer *ring) | ||
470 | { | ||
471 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
472 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | ||
473 | bool need_fence, need_mappable; | ||
474 | int ret; | ||
475 | |||
476 | need_fence = | ||
477 | has_fenced_gpu_access && | ||
478 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
479 | obj->tiling_mode != I915_TILING_NONE; | ||
480 | need_mappable = | ||
481 | entry->relocation_count ? true : need_fence; | ||
482 | |||
483 | ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); | ||
484 | if (ret) | ||
485 | return ret; | ||
486 | |||
487 | if (has_fenced_gpu_access) { | ||
488 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { | ||
489 | if (obj->tiling_mode) { | ||
490 | ret = i915_gem_object_get_fence(obj, ring); | ||
491 | if (ret) | ||
492 | goto err_unpin; | ||
493 | |||
494 | entry->flags |= __EXEC_OBJECT_HAS_FENCE; | ||
495 | i915_gem_object_pin_fence(obj); | ||
496 | } else { | ||
497 | ret = i915_gem_object_put_fence(obj); | ||
498 | if (ret) | ||
499 | goto err_unpin; | ||
500 | } | ||
501 | } | ||
502 | obj->pending_fenced_gpu_access = need_fence; | ||
503 | } | ||
504 | |||
505 | entry->offset = obj->gtt_offset; | ||
506 | return 0; | ||
507 | |||
508 | err_unpin: | ||
509 | i915_gem_object_unpin(obj); | ||
510 | return ret; | ||
511 | } | ||
512 | |||
464 | static int | 513 | static int |
465 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | 514 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
466 | struct drm_file *file, | 515 | struct drm_file *file, |
@@ -518,6 +567,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
518 | list_for_each_entry(obj, objects, exec_list) { | 567 | list_for_each_entry(obj, objects, exec_list) { |
519 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | 568 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
520 | bool need_fence, need_mappable; | 569 | bool need_fence, need_mappable; |
570 | |||
521 | if (!obj->gtt_space) | 571 | if (!obj->gtt_space) |
522 | continue; | 572 | continue; |
523 | 573 | ||
@@ -532,58 +582,47 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
532 | (need_mappable && !obj->map_and_fenceable)) | 582 | (need_mappable && !obj->map_and_fenceable)) |
533 | ret = i915_gem_object_unbind(obj); | 583 | ret = i915_gem_object_unbind(obj); |
534 | else | 584 | else |
535 | ret = i915_gem_object_pin(obj, | 585 | ret = pin_and_fence_object(obj, ring); |
536 | entry->alignment, | ||
537 | need_mappable); | ||
538 | if (ret) | 586 | if (ret) |
539 | goto err; | 587 | goto err; |
540 | |||
541 | entry++; | ||
542 | } | 588 | } |
543 | 589 | ||
544 | /* Bind fresh objects */ | 590 | /* Bind fresh objects */ |
545 | list_for_each_entry(obj, objects, exec_list) { | 591 | list_for_each_entry(obj, objects, exec_list) { |
546 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | 592 | if (obj->gtt_space) |
547 | bool need_fence; | 593 | continue; |
548 | |||
549 | need_fence = | ||
550 | has_fenced_gpu_access && | ||
551 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
552 | obj->tiling_mode != I915_TILING_NONE; | ||
553 | |||
554 | if (!obj->gtt_space) { | ||
555 | bool need_mappable = | ||
556 | entry->relocation_count ? true : need_fence; | ||
557 | |||
558 | ret = i915_gem_object_pin(obj, | ||
559 | entry->alignment, | ||
560 | need_mappable); | ||
561 | if (ret) | ||
562 | break; | ||
563 | } | ||
564 | 594 | ||
565 | if (has_fenced_gpu_access) { | 595 | ret = pin_and_fence_object(obj, ring); |
566 | if (need_fence) { | 596 | if (ret) { |
567 | ret = i915_gem_object_get_fence(obj, ring); | 597 | int ret_ignore; |
568 | if (ret) | 598 | |
569 | break; | 599 | /* This can potentially raise a harmless |
570 | } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && | 600 | * -EINVAL if we failed to bind in the above |
571 | obj->tiling_mode == I915_TILING_NONE) { | 601 | * call. It cannot raise -EINTR since we know |
572 | /* XXX pipelined! */ | 602 | * that the bo is freshly bound and so will |
573 | ret = i915_gem_object_put_fence(obj); | 603 | * not need to be flushed or waited upon. |
574 | if (ret) | 604 | */ |
575 | break; | 605 | ret_ignore = i915_gem_object_unbind(obj); |
576 | } | 606 | (void)ret_ignore; |
577 | obj->pending_fenced_gpu_access = need_fence; | 607 | WARN_ON(obj->gtt_space); |
608 | break; | ||
578 | } | 609 | } |
579 | |||
580 | entry->offset = obj->gtt_offset; | ||
581 | } | 610 | } |
582 | 611 | ||
583 | /* Decrement pin count for bound objects */ | 612 | /* Decrement pin count for bound objects */ |
584 | list_for_each_entry(obj, objects, exec_list) { | 613 | list_for_each_entry(obj, objects, exec_list) { |
585 | if (obj->gtt_space) | 614 | struct drm_i915_gem_exec_object2 *entry; |
586 | i915_gem_object_unpin(obj); | 615 | |
616 | if (!obj->gtt_space) | ||
617 | continue; | ||
618 | |||
619 | entry = obj->exec_entry; | ||
620 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { | ||
621 | i915_gem_object_unpin_fence(obj); | ||
622 | entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; | ||
623 | } | ||
624 | |||
625 | i915_gem_object_unpin(obj); | ||
587 | } | 626 | } |
588 | 627 | ||
589 | if (ret != -ENOSPC || retry > 1) | 628 | if (ret != -ENOSPC || retry > 1) |
@@ -600,16 +639,19 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
600 | } while (1); | 639 | } while (1); |
601 | 640 | ||
602 | err: | 641 | err: |
603 | obj = list_entry(obj->exec_list.prev, | 642 | list_for_each_entry_continue_reverse(obj, objects, exec_list) { |
604 | struct drm_i915_gem_object, | 643 | struct drm_i915_gem_exec_object2 *entry; |
605 | exec_list); | 644 | |
606 | while (objects != &obj->exec_list) { | 645 | if (!obj->gtt_space) |
607 | if (obj->gtt_space) | 646 | continue; |
608 | i915_gem_object_unpin(obj); | 647 | |
648 | entry = obj->exec_entry; | ||
649 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { | ||
650 | i915_gem_object_unpin_fence(obj); | ||
651 | entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; | ||
652 | } | ||
609 | 653 | ||
610 | obj = list_entry(obj->exec_list.prev, | 654 | i915_gem_object_unpin(obj); |
611 | struct drm_i915_gem_object, | ||
612 | exec_list); | ||
613 | } | 655 | } |
614 | 656 | ||
615 | return ret; | 657 | return ret; |
@@ -1186,7 +1228,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1186 | * so every billion or so execbuffers, we need to stall | 1228 | * so every billion or so execbuffers, we need to stall |
1187 | * the GPU in order to reset the counters. | 1229 | * the GPU in order to reset the counters. |
1188 | */ | 1230 | */ |
1189 | ret = i915_gpu_idle(dev); | 1231 | ret = i915_gpu_idle(dev, true); |
1190 | if (ret) | 1232 | if (ret) |
1191 | goto err; | 1233 | goto err; |
1192 | 1234 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 6042c5e6d278..11bddd5a5a6a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -55,7 +55,7 @@ static bool do_idling(struct drm_i915_private *dev_priv) | |||
55 | 55 | ||
56 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { | 56 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { |
57 | dev_priv->mm.interruptible = false; | 57 | dev_priv->mm.interruptible = false; |
58 | if (i915_gpu_idle(dev_priv->dev)) { | 58 | if (i915_gpu_idle(dev_priv->dev, false)) { |
59 | DRM_ERROR("Couldn't idle GPU\n"); | 59 | DRM_ERROR("Couldn't idle GPU\n"); |
60 | /* Wait a bit, in hopes it avoids the hang */ | 60 | /* Wait a bit, in hopes it avoids the hang */ |
61 | udelay(10); | 61 | udelay(10); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 31d334d9d9da..861223bf3944 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -107,10 +107,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
107 | */ | 107 | */ |
108 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | 108 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
109 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | 109 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
110 | } else if (IS_MOBILE(dev)) { | 110 | } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { |
111 | uint32_t dcc; | 111 | uint32_t dcc; |
112 | 112 | ||
113 | /* On mobile 9xx chipsets, channel interleave by the CPU is | 113 | /* On 9xx chipsets, channel interleave by the CPU is |
114 | * determined by DCC. For single-channel, neither the CPU | 114 | * determined by DCC. For single-channel, neither the CPU |
115 | * nor the GPU do swizzling. For dual channel interleaved, | 115 | * nor the GPU do swizzling. For dual channel interleaved, |
116 | * the GPU's interleave is bit 9 and 10 for X tiled, and bit | 116 | * the GPU's interleave is bit 9 and 10 for X tiled, and bit |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5bd4361ea84d..6442ff269642 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -720,7 +720,6 @@ i915_error_object_create(struct drm_i915_private *dev_priv, | |||
720 | reloc_offset = src->gtt_offset; | 720 | reloc_offset = src->gtt_offset; |
721 | for (page = 0; page < page_count; page++) { | 721 | for (page = 0; page < page_count; page++) { |
722 | unsigned long flags; | 722 | unsigned long flags; |
723 | void __iomem *s; | ||
724 | void *d; | 723 | void *d; |
725 | 724 | ||
726 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); | 725 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); |
@@ -728,10 +727,29 @@ i915_error_object_create(struct drm_i915_private *dev_priv, | |||
728 | goto unwind; | 727 | goto unwind; |
729 | 728 | ||
730 | local_irq_save(flags); | 729 | local_irq_save(flags); |
731 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 730 | if (reloc_offset < dev_priv->mm.gtt_mappable_end) { |
732 | reloc_offset); | 731 | void __iomem *s; |
733 | memcpy_fromio(d, s, PAGE_SIZE); | 732 | |
734 | io_mapping_unmap_atomic(s); | 733 | /* Simply ignore tiling or any overlapping fence. |
734 | * It's part of the error state, and this hopefully | ||
735 | * captures what the GPU read. | ||
736 | */ | ||
737 | |||
738 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | ||
739 | reloc_offset); | ||
740 | memcpy_fromio(d, s, PAGE_SIZE); | ||
741 | io_mapping_unmap_atomic(s); | ||
742 | } else { | ||
743 | void *s; | ||
744 | |||
745 | drm_clflush_pages(&src->pages[page], 1); | ||
746 | |||
747 | s = kmap_atomic(src->pages[page]); | ||
748 | memcpy(d, s, PAGE_SIZE); | ||
749 | kunmap_atomic(s); | ||
750 | |||
751 | drm_clflush_pages(&src->pages[page], 1); | ||
752 | } | ||
735 | local_irq_restore(flags); | 753 | local_irq_restore(flags); |
736 | 754 | ||
737 | dst->pages[page] = d; | 755 | dst->pages[page] = d; |
@@ -804,7 +822,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, | |||
804 | err->tiling = obj->tiling_mode; | 822 | err->tiling = obj->tiling_mode; |
805 | err->dirty = obj->dirty; | 823 | err->dirty = obj->dirty; |
806 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | 824 | err->purgeable = obj->madv != I915_MADV_WILLNEED; |
807 | err->ring = obj->ring ? obj->ring->id : 0; | 825 | err->ring = obj->ring ? obj->ring->id : -1; |
808 | err->cache_level = obj->cache_level; | 826 | err->cache_level = obj->cache_level; |
809 | 827 | ||
810 | if (++i == count) | 828 | if (++i == count) |
@@ -876,6 +894,39 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | |||
876 | return NULL; | 894 | return NULL; |
877 | } | 895 | } |
878 | 896 | ||
897 | static void i915_record_ring_state(struct drm_device *dev, | ||
898 | struct drm_i915_error_state *error, | ||
899 | struct intel_ring_buffer *ring) | ||
900 | { | ||
901 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
902 | |||
903 | if (INTEL_INFO(dev)->gen >= 6) { | ||
904 | error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); | ||
905 | error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); | ||
906 | } | ||
907 | |||
908 | if (INTEL_INFO(dev)->gen >= 4) { | ||
909 | error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); | ||
910 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); | ||
911 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); | ||
912 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); | ||
913 | if (ring->id == RCS) { | ||
914 | error->instdone1 = I915_READ(INSTDONE1); | ||
915 | error->bbaddr = I915_READ64(BB_ADDR); | ||
916 | } | ||
917 | } else { | ||
918 | error->ipeir[ring->id] = I915_READ(IPEIR); | ||
919 | error->ipehr[ring->id] = I915_READ(IPEHR); | ||
920 | error->instdone[ring->id] = I915_READ(INSTDONE); | ||
921 | } | ||
922 | |||
923 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); | ||
924 | error->seqno[ring->id] = ring->get_seqno(ring); | ||
925 | error->acthd[ring->id] = intel_ring_get_active_head(ring); | ||
926 | error->head[ring->id] = I915_READ_HEAD(ring); | ||
927 | error->tail[ring->id] = I915_READ_TAIL(ring); | ||
928 | } | ||
929 | |||
879 | /** | 930 | /** |
880 | * i915_capture_error_state - capture an error record for later analysis | 931 | * i915_capture_error_state - capture an error record for later analysis |
881 | * @dev: drm device | 932 | * @dev: drm device |
@@ -900,7 +951,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
900 | return; | 951 | return; |
901 | 952 | ||
902 | /* Account for pipe specific data like PIPE*STAT */ | 953 | /* Account for pipe specific data like PIPE*STAT */ |
903 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 954 | error = kzalloc(sizeof(*error), GFP_ATOMIC); |
904 | if (!error) { | 955 | if (!error) { |
905 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | 956 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
906 | return; | 957 | return; |
@@ -909,47 +960,22 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
909 | DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", | 960 | DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", |
910 | dev->primary->index); | 961 | dev->primary->index); |
911 | 962 | ||
912 | error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); | ||
913 | error->eir = I915_READ(EIR); | 963 | error->eir = I915_READ(EIR); |
914 | error->pgtbl_er = I915_READ(PGTBL_ER); | 964 | error->pgtbl_er = I915_READ(PGTBL_ER); |
915 | for_each_pipe(pipe) | 965 | for_each_pipe(pipe) |
916 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); | 966 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); |
917 | error->instpm = I915_READ(INSTPM); | 967 | |
918 | error->error = 0; | ||
919 | if (INTEL_INFO(dev)->gen >= 6) { | 968 | if (INTEL_INFO(dev)->gen >= 6) { |
920 | error->error = I915_READ(ERROR_GEN6); | 969 | error->error = I915_READ(ERROR_GEN6); |
921 | 970 | error->done_reg = I915_READ(DONE_REG); | |
922 | error->bcs_acthd = I915_READ(BCS_ACTHD); | ||
923 | error->bcs_ipehr = I915_READ(BCS_IPEHR); | ||
924 | error->bcs_ipeir = I915_READ(BCS_IPEIR); | ||
925 | error->bcs_instdone = I915_READ(BCS_INSTDONE); | ||
926 | error->bcs_seqno = 0; | ||
927 | if (dev_priv->ring[BCS].get_seqno) | ||
928 | error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]); | ||
929 | |||
930 | error->vcs_acthd = I915_READ(VCS_ACTHD); | ||
931 | error->vcs_ipehr = I915_READ(VCS_IPEHR); | ||
932 | error->vcs_ipeir = I915_READ(VCS_IPEIR); | ||
933 | error->vcs_instdone = I915_READ(VCS_INSTDONE); | ||
934 | error->vcs_seqno = 0; | ||
935 | if (dev_priv->ring[VCS].get_seqno) | ||
936 | error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]); | ||
937 | } | ||
938 | if (INTEL_INFO(dev)->gen >= 4) { | ||
939 | error->ipeir = I915_READ(IPEIR_I965); | ||
940 | error->ipehr = I915_READ(IPEHR_I965); | ||
941 | error->instdone = I915_READ(INSTDONE_I965); | ||
942 | error->instps = I915_READ(INSTPS); | ||
943 | error->instdone1 = I915_READ(INSTDONE1); | ||
944 | error->acthd = I915_READ(ACTHD_I965); | ||
945 | error->bbaddr = I915_READ64(BB_ADDR); | ||
946 | } else { | ||
947 | error->ipeir = I915_READ(IPEIR); | ||
948 | error->ipehr = I915_READ(IPEHR); | ||
949 | error->instdone = I915_READ(INSTDONE); | ||
950 | error->acthd = I915_READ(ACTHD); | ||
951 | error->bbaddr = 0; | ||
952 | } | 971 | } |
972 | |||
973 | i915_record_ring_state(dev, error, &dev_priv->ring[RCS]); | ||
974 | if (HAS_BLT(dev)) | ||
975 | i915_record_ring_state(dev, error, &dev_priv->ring[BCS]); | ||
976 | if (HAS_BSD(dev)) | ||
977 | i915_record_ring_state(dev, error, &dev_priv->ring[VCS]); | ||
978 | |||
953 | i915_gem_record_fences(dev, error); | 979 | i915_gem_record_fences(dev, error); |
954 | 980 | ||
955 | /* Record the active batch and ring buffers */ | 981 | /* Record the active batch and ring buffers */ |
@@ -1017,11 +1043,12 @@ void i915_destroy_error_state(struct drm_device *dev) | |||
1017 | { | 1043 | { |
1018 | struct drm_i915_private *dev_priv = dev->dev_private; | 1044 | struct drm_i915_private *dev_priv = dev->dev_private; |
1019 | struct drm_i915_error_state *error; | 1045 | struct drm_i915_error_state *error; |
1046 | unsigned long flags; | ||
1020 | 1047 | ||
1021 | spin_lock(&dev_priv->error_lock); | 1048 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
1022 | error = dev_priv->first_error; | 1049 | error = dev_priv->first_error; |
1023 | dev_priv->first_error = NULL; | 1050 | dev_priv->first_error = NULL; |
1024 | spin_unlock(&dev_priv->error_lock); | 1051 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
1025 | 1052 | ||
1026 | if (error) | 1053 | if (error) |
1027 | i915_error_state_free(dev, error); | 1054 | i915_error_state_free(dev, error); |
@@ -1698,6 +1725,7 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1698 | dev_priv->last_instdone1 == instdone1) { | 1725 | dev_priv->last_instdone1 == instdone1) { |
1699 | if (dev_priv->hangcheck_count++ > 1) { | 1726 | if (dev_priv->hangcheck_count++ > 1) { |
1700 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); | 1727 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); |
1728 | i915_handle_error(dev, true); | ||
1701 | 1729 | ||
1702 | if (!IS_GEN2(dev)) { | 1730 | if (!IS_GEN2(dev)) { |
1703 | /* Is the chip hanging on a WAIT_FOR_EVENT? | 1731 | /* Is the chip hanging on a WAIT_FOR_EVENT? |
@@ -1705,7 +1733,6 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1705 | * and break the hang. This should work on | 1733 | * and break the hang. This should work on |
1706 | * all but the second generation chipsets. | 1734 | * all but the second generation chipsets. |
1707 | */ | 1735 | */ |
1708 | |||
1709 | if (kick_ring(&dev_priv->ring[RCS])) | 1736 | if (kick_ring(&dev_priv->ring[RCS])) |
1710 | goto repeat; | 1737 | goto repeat; |
1711 | 1738 | ||
@@ -1718,7 +1745,6 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1718 | goto repeat; | 1745 | goto repeat; |
1719 | } | 1746 | } |
1720 | 1747 | ||
1721 | i915_handle_error(dev, true); | ||
1722 | return; | 1748 | return; |
1723 | } | 1749 | } |
1724 | } else { | 1750 | } else { |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c3afb783cb9d..f9607387c00c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -319,6 +319,8 @@ | |||
319 | #define RING_HWS_PGA(base) ((base)+0x80) | 319 | #define RING_HWS_PGA(base) ((base)+0x80) |
320 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) | 320 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) |
321 | #define RENDER_HWS_PGA_GEN7 (0x04080) | 321 | #define RENDER_HWS_PGA_GEN7 (0x04080) |
322 | #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) | ||
323 | #define DONE_REG 0x40b0 | ||
322 | #define BSD_HWS_PGA_GEN7 (0x04180) | 324 | #define BSD_HWS_PGA_GEN7 (0x04180) |
323 | #define BLT_HWS_PGA_GEN7 (0x04280) | 325 | #define BLT_HWS_PGA_GEN7 (0x04280) |
324 | #define RING_ACTHD(base) ((base)+0x74) | 326 | #define RING_ACTHD(base) ((base)+0x74) |
@@ -352,6 +354,12 @@ | |||
352 | #define IPEIR_I965 0x02064 | 354 | #define IPEIR_I965 0x02064 |
353 | #define IPEHR_I965 0x02068 | 355 | #define IPEHR_I965 0x02068 |
354 | #define INSTDONE_I965 0x0206c | 356 | #define INSTDONE_I965 0x0206c |
357 | #define RING_IPEIR(base) ((base)+0x64) | ||
358 | #define RING_IPEHR(base) ((base)+0x68) | ||
359 | #define RING_INSTDONE(base) ((base)+0x6c) | ||
360 | #define RING_INSTPS(base) ((base)+0x70) | ||
361 | #define RING_DMA_FADD(base) ((base)+0x78) | ||
362 | #define RING_INSTPM(base) ((base)+0xc0) | ||
355 | #define INSTPS 0x02070 /* 965+ only */ | 363 | #define INSTPS 0x02070 /* 965+ only */ |
356 | #define INSTDONE1 0x0207c /* 965+ only */ | 364 | #define INSTDONE1 0x0207c /* 965+ only */ |
357 | #define ACTHD_I965 0x02074 | 365 | #define ACTHD_I965 0x02074 |
@@ -365,14 +373,6 @@ | |||
365 | #define INSTDONE 0x02090 | 373 | #define INSTDONE 0x02090 |
366 | #define NOPID 0x02094 | 374 | #define NOPID 0x02094 |
367 | #define HWSTAM 0x02098 | 375 | #define HWSTAM 0x02098 |
368 | #define VCS_INSTDONE 0x1206C | ||
369 | #define VCS_IPEIR 0x12064 | ||
370 | #define VCS_IPEHR 0x12068 | ||
371 | #define VCS_ACTHD 0x12074 | ||
372 | #define BCS_INSTDONE 0x2206C | ||
373 | #define BCS_IPEIR 0x22064 | ||
374 | #define BCS_IPEHR 0x22068 | ||
375 | #define BCS_ACTHD 0x22074 | ||
376 | 376 | ||
377 | #define ERROR_GEN6 0x040a0 | 377 | #define ERROR_GEN6 0x040a0 |
378 | 378 | ||
@@ -391,7 +391,7 @@ | |||
391 | 391 | ||
392 | #define MI_MODE 0x0209c | 392 | #define MI_MODE 0x0209c |
393 | # define VS_TIMER_DISPATCH (1 << 6) | 393 | # define VS_TIMER_DISPATCH (1 << 6) |
394 | # define MI_FLUSH_ENABLE (1 << 11) | 394 | # define MI_FLUSH_ENABLE (1 << 12) |
395 | 395 | ||
396 | #define GFX_MODE 0x02520 | 396 | #define GFX_MODE 0x02520 |
397 | #define GFX_MODE_GEN7 0x0229c | 397 | #define GFX_MODE_GEN7 0x0229c |
@@ -3742,4 +3742,16 @@ | |||
3742 | */ | 3742 | */ |
3743 | #define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) | 3743 | #define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) |
3744 | 3744 | ||
3745 | #define IBX_AUD_CONFIG_A 0xe2000 | ||
3746 | #define CPT_AUD_CONFIG_A 0xe5000 | ||
3747 | #define AUD_CONFIG_N_VALUE_INDEX (1 << 29) | ||
3748 | #define AUD_CONFIG_N_PROG_ENABLE (1 << 28) | ||
3749 | #define AUD_CONFIG_UPPER_N_SHIFT 20 | ||
3750 | #define AUD_CONFIG_UPPER_N_VALUE (0xff << 20) | ||
3751 | #define AUD_CONFIG_LOWER_N_SHIFT 4 | ||
3752 | #define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4) | ||
3753 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16 | ||
3754 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) | ||
3755 | #define AUD_CONFIG_DISABLE_NCTS (1 << 3) | ||
3756 | |||
3745 | #endif /* _I915_REG_H_ */ | 3757 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5ba19df199e4..dfa67449827a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -936,6 +936,10 @@ void assert_pipe(struct drm_i915_private *dev_priv, | |||
936 | u32 val; | 936 | u32 val; |
937 | bool cur_state; | 937 | bool cur_state; |
938 | 938 | ||
939 | /* if we need the pipe A quirk it must be always on */ | ||
940 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) | ||
941 | state = true; | ||
942 | |||
939 | reg = PIPECONF(pipe); | 943 | reg = PIPECONF(pipe); |
940 | val = I915_READ(reg); | 944 | val = I915_READ(reg); |
941 | cur_state = !!(val & PIPECONF_ENABLE); | 945 | cur_state = !!(val & PIPECONF_ENABLE); |
@@ -2037,6 +2041,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
2037 | ret = i915_gem_object_get_fence(obj, pipelined); | 2041 | ret = i915_gem_object_get_fence(obj, pipelined); |
2038 | if (ret) | 2042 | if (ret) |
2039 | goto err_unpin; | 2043 | goto err_unpin; |
2044 | |||
2045 | i915_gem_object_pin_fence(obj); | ||
2040 | } | 2046 | } |
2041 | 2047 | ||
2042 | dev_priv->mm.interruptible = true; | 2048 | dev_priv->mm.interruptible = true; |
@@ -2049,6 +2055,12 @@ err_interruptible: | |||
2049 | return ret; | 2055 | return ret; |
2050 | } | 2056 | } |
2051 | 2057 | ||
2058 | void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) | ||
2059 | { | ||
2060 | i915_gem_object_unpin_fence(obj); | ||
2061 | i915_gem_object_unpin(obj); | ||
2062 | } | ||
2063 | |||
2052 | static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, | 2064 | static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2053 | int x, int y) | 2065 | int x, int y) |
2054 | { | 2066 | { |
@@ -2280,7 +2292,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2280 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, | 2292 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
2281 | LEAVE_ATOMIC_MODE_SET); | 2293 | LEAVE_ATOMIC_MODE_SET); |
2282 | if (ret) { | 2294 | if (ret) { |
2283 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | 2295 | intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); |
2284 | mutex_unlock(&dev->struct_mutex); | 2296 | mutex_unlock(&dev->struct_mutex); |
2285 | DRM_ERROR("failed to update base address\n"); | 2297 | DRM_ERROR("failed to update base address\n"); |
2286 | return ret; | 2298 | return ret; |
@@ -2288,7 +2300,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2288 | 2300 | ||
2289 | if (old_fb) { | 2301 | if (old_fb) { |
2290 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2302 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2291 | i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); | 2303 | intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); |
2292 | } | 2304 | } |
2293 | 2305 | ||
2294 | mutex_unlock(&dev->struct_mutex); | 2306 | mutex_unlock(&dev->struct_mutex); |
@@ -3351,7 +3363,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc) | |||
3351 | 3363 | ||
3352 | if (crtc->fb) { | 3364 | if (crtc->fb) { |
3353 | mutex_lock(&dev->struct_mutex); | 3365 | mutex_lock(&dev->struct_mutex); |
3354 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | 3366 | intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); |
3355 | mutex_unlock(&dev->struct_mutex); | 3367 | mutex_unlock(&dev->struct_mutex); |
3356 | } | 3368 | } |
3357 | } | 3369 | } |
@@ -4548,6 +4560,7 @@ void sandybridge_update_wm(struct drm_device *dev) | |||
4548 | { | 4560 | { |
4549 | struct drm_i915_private *dev_priv = dev->dev_private; | 4561 | struct drm_i915_private *dev_priv = dev->dev_private; |
4550 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ | 4562 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
4563 | u32 val; | ||
4551 | int fbc_wm, plane_wm, cursor_wm; | 4564 | int fbc_wm, plane_wm, cursor_wm; |
4552 | unsigned int enabled; | 4565 | unsigned int enabled; |
4553 | 4566 | ||
@@ -4556,8 +4569,10 @@ void sandybridge_update_wm(struct drm_device *dev) | |||
4556 | &sandybridge_display_wm_info, latency, | 4569 | &sandybridge_display_wm_info, latency, |
4557 | &sandybridge_cursor_wm_info, latency, | 4570 | &sandybridge_cursor_wm_info, latency, |
4558 | &plane_wm, &cursor_wm)) { | 4571 | &plane_wm, &cursor_wm)) { |
4559 | I915_WRITE(WM0_PIPEA_ILK, | 4572 | val = I915_READ(WM0_PIPEA_ILK); |
4560 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | 4573 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
4574 | I915_WRITE(WM0_PIPEA_ILK, val | | ||
4575 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
4561 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | 4576 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
4562 | " plane %d, " "cursor: %d\n", | 4577 | " plane %d, " "cursor: %d\n", |
4563 | plane_wm, cursor_wm); | 4578 | plane_wm, cursor_wm); |
@@ -4568,8 +4583,10 @@ void sandybridge_update_wm(struct drm_device *dev) | |||
4568 | &sandybridge_display_wm_info, latency, | 4583 | &sandybridge_display_wm_info, latency, |
4569 | &sandybridge_cursor_wm_info, latency, | 4584 | &sandybridge_cursor_wm_info, latency, |
4570 | &plane_wm, &cursor_wm)) { | 4585 | &plane_wm, &cursor_wm)) { |
4571 | I915_WRITE(WM0_PIPEB_ILK, | 4586 | val = I915_READ(WM0_PIPEB_ILK); |
4572 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | 4587 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
4588 | I915_WRITE(WM0_PIPEB_ILK, val | | ||
4589 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
4573 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | 4590 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
4574 | " plane %d, cursor: %d\n", | 4591 | " plane %d, cursor: %d\n", |
4575 | plane_wm, cursor_wm); | 4592 | plane_wm, cursor_wm); |
@@ -4582,8 +4599,10 @@ void sandybridge_update_wm(struct drm_device *dev) | |||
4582 | &sandybridge_display_wm_info, latency, | 4599 | &sandybridge_display_wm_info, latency, |
4583 | &sandybridge_cursor_wm_info, latency, | 4600 | &sandybridge_cursor_wm_info, latency, |
4584 | &plane_wm, &cursor_wm)) { | 4601 | &plane_wm, &cursor_wm)) { |
4585 | I915_WRITE(WM0_PIPEC_IVB, | 4602 | val = I915_READ(WM0_PIPEC_IVB); |
4586 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | 4603 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
4604 | I915_WRITE(WM0_PIPEC_IVB, val | | ||
4605 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
4587 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" | 4606 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" |
4588 | " plane %d, cursor: %d\n", | 4607 | " plane %d, cursor: %d\n", |
4589 | plane_wm, cursor_wm); | 4608 | plane_wm, cursor_wm); |
@@ -4727,6 +4746,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, | |||
4727 | { | 4746 | { |
4728 | struct drm_i915_private *dev_priv = dev->dev_private; | 4747 | struct drm_i915_private *dev_priv = dev->dev_private; |
4729 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ | 4748 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
4749 | u32 val; | ||
4730 | int sprite_wm, reg; | 4750 | int sprite_wm, reg; |
4731 | int ret; | 4751 | int ret; |
4732 | 4752 | ||
@@ -4753,7 +4773,9 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, | |||
4753 | return; | 4773 | return; |
4754 | } | 4774 | } |
4755 | 4775 | ||
4756 | I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); | 4776 | val = I915_READ(reg); |
4777 | val &= ~WM0_PIPE_SPRITE_MASK; | ||
4778 | I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); | ||
4757 | DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); | 4779 | DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); |
4758 | 4780 | ||
4759 | 4781 | ||
@@ -6130,15 +6152,18 @@ static void ironlake_write_eld(struct drm_connector *connector, | |||
6130 | uint32_t i; | 6152 | uint32_t i; |
6131 | int len; | 6153 | int len; |
6132 | int hdmiw_hdmiedid; | 6154 | int hdmiw_hdmiedid; |
6155 | int aud_config; | ||
6133 | int aud_cntl_st; | 6156 | int aud_cntl_st; |
6134 | int aud_cntrl_st2; | 6157 | int aud_cntrl_st2; |
6135 | 6158 | ||
6136 | if (HAS_PCH_IBX(connector->dev)) { | 6159 | if (HAS_PCH_IBX(connector->dev)) { |
6137 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; | 6160 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; |
6161 | aud_config = IBX_AUD_CONFIG_A; | ||
6138 | aud_cntl_st = IBX_AUD_CNTL_ST_A; | 6162 | aud_cntl_st = IBX_AUD_CNTL_ST_A; |
6139 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; | 6163 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; |
6140 | } else { | 6164 | } else { |
6141 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; | 6165 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; |
6166 | aud_config = CPT_AUD_CONFIG_A; | ||
6142 | aud_cntl_st = CPT_AUD_CNTL_ST_A; | 6167 | aud_cntl_st = CPT_AUD_CNTL_ST_A; |
6143 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; | 6168 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; |
6144 | } | 6169 | } |
@@ -6146,6 +6171,7 @@ static void ironlake_write_eld(struct drm_connector *connector, | |||
6146 | i = to_intel_crtc(crtc)->pipe; | 6171 | i = to_intel_crtc(crtc)->pipe; |
6147 | hdmiw_hdmiedid += i * 0x100; | 6172 | hdmiw_hdmiedid += i * 0x100; |
6148 | aud_cntl_st += i * 0x100; | 6173 | aud_cntl_st += i * 0x100; |
6174 | aud_config += i * 0x100; | ||
6149 | 6175 | ||
6150 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); | 6176 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); |
6151 | 6177 | ||
@@ -6165,7 +6191,9 @@ static void ironlake_write_eld(struct drm_connector *connector, | |||
6165 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 6191 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
6166 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); | 6192 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
6167 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ | 6193 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
6168 | } | 6194 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ |
6195 | } else | ||
6196 | I915_WRITE(aud_config, 0); | ||
6169 | 6197 | ||
6170 | if (intel_eld_uptodate(connector, | 6198 | if (intel_eld_uptodate(connector, |
6171 | aud_cntrl_st2, eldv, | 6199 | aud_cntrl_st2, eldv, |
@@ -7141,7 +7169,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
7141 | container_of(__work, struct intel_unpin_work, work); | 7169 | container_of(__work, struct intel_unpin_work, work); |
7142 | 7170 | ||
7143 | mutex_lock(&work->dev->struct_mutex); | 7171 | mutex_lock(&work->dev->struct_mutex); |
7144 | i915_gem_object_unpin(work->old_fb_obj); | 7172 | intel_unpin_fb_obj(work->old_fb_obj); |
7145 | drm_gem_object_unreference(&work->pending_flip_obj->base); | 7173 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
7146 | drm_gem_object_unreference(&work->old_fb_obj->base); | 7174 | drm_gem_object_unreference(&work->old_fb_obj->base); |
7147 | 7175 | ||
@@ -7291,7 +7319,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, | |||
7291 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 7319 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
7292 | OUT_RING(fb->pitches[0]); | 7320 | OUT_RING(fb->pitches[0]); |
7293 | OUT_RING(obj->gtt_offset + offset); | 7321 | OUT_RING(obj->gtt_offset + offset); |
7294 | OUT_RING(MI_NOOP); | 7322 | OUT_RING(0); /* aux display base address, unused */ |
7295 | ADVANCE_LP_RING(); | 7323 | ADVANCE_LP_RING(); |
7296 | out: | 7324 | out: |
7297 | return ret; | 7325 | return ret; |
@@ -7883,7 +7911,8 @@ int intel_framebuffer_init(struct drm_device *dev, | |||
7883 | case DRM_FORMAT_VYUY: | 7911 | case DRM_FORMAT_VYUY: |
7884 | break; | 7912 | break; |
7885 | default: | 7913 | default: |
7886 | DRM_ERROR("unsupported pixel format\n"); | 7914 | DRM_DEBUG_KMS("unsupported pixel format %u\n", |
7915 | mode_cmd->pixel_format); | ||
7887 | return -EINVAL; | 7916 | return -EINVAL; |
7888 | } | 7917 | } |
7889 | 7918 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1348705faf6b..9cec6c3937fa 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -374,6 +374,7 @@ extern void intel_init_emon(struct drm_device *dev); | |||
374 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | 374 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
375 | struct drm_i915_gem_object *obj, | 375 | struct drm_i915_gem_object *obj, |
376 | struct intel_ring_buffer *pipelined); | 376 | struct intel_ring_buffer *pipelined); |
377 | extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); | ||
377 | 378 | ||
378 | extern int intel_framebuffer_init(struct drm_device *dev, | 379 | extern int intel_framebuffer_init(struct drm_device *dev, |
379 | struct intel_framebuffer *ifb, | 380 | struct intel_framebuffer *ifb, |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index cdf17d4cc1f7..23a543cdfa99 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -227,7 +227,8 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | |||
227 | } | 227 | } |
228 | overlay->last_flip_req = request->seqno; | 228 | overlay->last_flip_req = request->seqno; |
229 | overlay->flip_tail = tail; | 229 | overlay->flip_tail = tail; |
230 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); | 230 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, |
231 | true); | ||
231 | if (ret) | 232 | if (ret) |
232 | return ret; | 233 | return ret; |
233 | 234 | ||
@@ -448,7 +449,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) | |||
448 | if (overlay->last_flip_req == 0) | 449 | if (overlay->last_flip_req == 0) |
449 | return 0; | 450 | return 0; |
450 | 451 | ||
451 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); | 452 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, |
453 | true); | ||
452 | if (ret) | 454 | if (ret) |
453 | return ret; | 455 | return ret; |
454 | 456 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 1ab842c6032e..4956f1bff522 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -399,8 +399,6 @@ static int init_render_ring(struct intel_ring_buffer *ring) | |||
399 | 399 | ||
400 | if (INTEL_INFO(dev)->gen > 3) { | 400 | if (INTEL_INFO(dev)->gen > 3) { |
401 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | 401 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
402 | if (IS_GEN6(dev) || IS_GEN7(dev)) | ||
403 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | ||
404 | I915_WRITE(MI_MODE, mode); | 402 | I915_WRITE(MI_MODE, mode); |
405 | if (IS_GEN7(dev)) | 403 | if (IS_GEN7(dev)) |
406 | I915_WRITE(GFX_MODE_GEN7, | 404 | I915_WRITE(GFX_MODE_GEN7, |
@@ -744,13 +742,13 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |||
744 | */ | 742 | */ |
745 | if (IS_GEN7(dev)) { | 743 | if (IS_GEN7(dev)) { |
746 | switch (ring->id) { | 744 | switch (ring->id) { |
747 | case RING_RENDER: | 745 | case RCS: |
748 | mmio = RENDER_HWS_PGA_GEN7; | 746 | mmio = RENDER_HWS_PGA_GEN7; |
749 | break; | 747 | break; |
750 | case RING_BLT: | 748 | case BCS: |
751 | mmio = BLT_HWS_PGA_GEN7; | 749 | mmio = BLT_HWS_PGA_GEN7; |
752 | break; | 750 | break; |
753 | case RING_BSD: | 751 | case VCS: |
754 | mmio = BSD_HWS_PGA_GEN7; | 752 | mmio = BSD_HWS_PGA_GEN7; |
755 | break; | 753 | break; |
756 | } | 754 | } |
@@ -1212,7 +1210,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring) | |||
1212 | 1210 | ||
1213 | static const struct intel_ring_buffer render_ring = { | 1211 | static const struct intel_ring_buffer render_ring = { |
1214 | .name = "render ring", | 1212 | .name = "render ring", |
1215 | .id = RING_RENDER, | 1213 | .id = RCS, |
1216 | .mmio_base = RENDER_RING_BASE, | 1214 | .mmio_base = RENDER_RING_BASE, |
1217 | .size = 32 * PAGE_SIZE, | 1215 | .size = 32 * PAGE_SIZE, |
1218 | .init = init_render_ring, | 1216 | .init = init_render_ring, |
@@ -1235,7 +1233,7 @@ static const struct intel_ring_buffer render_ring = { | |||
1235 | 1233 | ||
1236 | static const struct intel_ring_buffer bsd_ring = { | 1234 | static const struct intel_ring_buffer bsd_ring = { |
1237 | .name = "bsd ring", | 1235 | .name = "bsd ring", |
1238 | .id = RING_BSD, | 1236 | .id = VCS, |
1239 | .mmio_base = BSD_RING_BASE, | 1237 | .mmio_base = BSD_RING_BASE, |
1240 | .size = 32 * PAGE_SIZE, | 1238 | .size = 32 * PAGE_SIZE, |
1241 | .init = init_ring_common, | 1239 | .init = init_ring_common, |
@@ -1345,7 +1343,7 @@ gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) | |||
1345 | /* ring buffer for Video Codec for Gen6+ */ | 1343 | /* ring buffer for Video Codec for Gen6+ */ |
1346 | static const struct intel_ring_buffer gen6_bsd_ring = { | 1344 | static const struct intel_ring_buffer gen6_bsd_ring = { |
1347 | .name = "gen6 bsd ring", | 1345 | .name = "gen6 bsd ring", |
1348 | .id = RING_BSD, | 1346 | .id = VCS, |
1349 | .mmio_base = GEN6_BSD_RING_BASE, | 1347 | .mmio_base = GEN6_BSD_RING_BASE, |
1350 | .size = 32 * PAGE_SIZE, | 1348 | .size = 32 * PAGE_SIZE, |
1351 | .init = init_ring_common, | 1349 | .init = init_ring_common, |
@@ -1381,79 +1379,13 @@ blt_ring_put_irq(struct intel_ring_buffer *ring) | |||
1381 | GEN6_BLITTER_USER_INTERRUPT); | 1379 | GEN6_BLITTER_USER_INTERRUPT); |
1382 | } | 1380 | } |
1383 | 1381 | ||
1384 | |||
1385 | /* Workaround for some stepping of SNB, | ||
1386 | * each time when BLT engine ring tail moved, | ||
1387 | * the first command in the ring to be parsed | ||
1388 | * should be MI_BATCH_BUFFER_START | ||
1389 | */ | ||
1390 | #define NEED_BLT_WORKAROUND(dev) \ | ||
1391 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) | ||
1392 | |||
1393 | static inline struct drm_i915_gem_object * | ||
1394 | to_blt_workaround(struct intel_ring_buffer *ring) | ||
1395 | { | ||
1396 | return ring->private; | ||
1397 | } | ||
1398 | |||
1399 | static int blt_ring_init(struct intel_ring_buffer *ring) | ||
1400 | { | ||
1401 | if (NEED_BLT_WORKAROUND(ring->dev)) { | ||
1402 | struct drm_i915_gem_object *obj; | ||
1403 | u32 *ptr; | ||
1404 | int ret; | ||
1405 | |||
1406 | obj = i915_gem_alloc_object(ring->dev, 4096); | ||
1407 | if (obj == NULL) | ||
1408 | return -ENOMEM; | ||
1409 | |||
1410 | ret = i915_gem_object_pin(obj, 4096, true); | ||
1411 | if (ret) { | ||
1412 | drm_gem_object_unreference(&obj->base); | ||
1413 | return ret; | ||
1414 | } | ||
1415 | |||
1416 | ptr = kmap(obj->pages[0]); | ||
1417 | *ptr++ = MI_BATCH_BUFFER_END; | ||
1418 | *ptr++ = MI_NOOP; | ||
1419 | kunmap(obj->pages[0]); | ||
1420 | |||
1421 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | ||
1422 | if (ret) { | ||
1423 | i915_gem_object_unpin(obj); | ||
1424 | drm_gem_object_unreference(&obj->base); | ||
1425 | return ret; | ||
1426 | } | ||
1427 | |||
1428 | ring->private = obj; | ||
1429 | } | ||
1430 | |||
1431 | return init_ring_common(ring); | ||
1432 | } | ||
1433 | |||
1434 | static int blt_ring_begin(struct intel_ring_buffer *ring, | ||
1435 | int num_dwords) | ||
1436 | { | ||
1437 | if (ring->private) { | ||
1438 | int ret = intel_ring_begin(ring, num_dwords+2); | ||
1439 | if (ret) | ||
1440 | return ret; | ||
1441 | |||
1442 | intel_ring_emit(ring, MI_BATCH_BUFFER_START); | ||
1443 | intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); | ||
1444 | |||
1445 | return 0; | ||
1446 | } else | ||
1447 | return intel_ring_begin(ring, 4); | ||
1448 | } | ||
1449 | |||
1450 | static int blt_ring_flush(struct intel_ring_buffer *ring, | 1382 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
1451 | u32 invalidate, u32 flush) | 1383 | u32 invalidate, u32 flush) |
1452 | { | 1384 | { |
1453 | uint32_t cmd; | 1385 | uint32_t cmd; |
1454 | int ret; | 1386 | int ret; |
1455 | 1387 | ||
1456 | ret = blt_ring_begin(ring, 4); | 1388 | ret = intel_ring_begin(ring, 4); |
1457 | if (ret) | 1389 | if (ret) |
1458 | return ret; | 1390 | return ret; |
1459 | 1391 | ||
@@ -1468,22 +1400,12 @@ static int blt_ring_flush(struct intel_ring_buffer *ring, | |||
1468 | return 0; | 1400 | return 0; |
1469 | } | 1401 | } |
1470 | 1402 | ||
1471 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) | ||
1472 | { | ||
1473 | if (!ring->private) | ||
1474 | return; | ||
1475 | |||
1476 | i915_gem_object_unpin(ring->private); | ||
1477 | drm_gem_object_unreference(ring->private); | ||
1478 | ring->private = NULL; | ||
1479 | } | ||
1480 | |||
1481 | static const struct intel_ring_buffer gen6_blt_ring = { | 1403 | static const struct intel_ring_buffer gen6_blt_ring = { |
1482 | .name = "blt ring", | 1404 | .name = "blt ring", |
1483 | .id = RING_BLT, | 1405 | .id = BCS, |
1484 | .mmio_base = BLT_RING_BASE, | 1406 | .mmio_base = BLT_RING_BASE, |
1485 | .size = 32 * PAGE_SIZE, | 1407 | .size = 32 * PAGE_SIZE, |
1486 | .init = blt_ring_init, | 1408 | .init = init_ring_common, |
1487 | .write_tail = ring_write_tail, | 1409 | .write_tail = ring_write_tail, |
1488 | .flush = blt_ring_flush, | 1410 | .flush = blt_ring_flush, |
1489 | .add_request = gen6_add_request, | 1411 | .add_request = gen6_add_request, |
@@ -1491,7 +1413,6 @@ static const struct intel_ring_buffer gen6_blt_ring = { | |||
1491 | .irq_get = blt_ring_get_irq, | 1413 | .irq_get = blt_ring_get_irq, |
1492 | .irq_put = blt_ring_put_irq, | 1414 | .irq_put = blt_ring_put_irq, |
1493 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, | 1415 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
1494 | .cleanup = blt_ring_cleanup, | ||
1495 | .sync_to = gen6_blt_ring_sync_to, | 1416 | .sync_to = gen6_blt_ring_sync_to, |
1496 | .semaphore_register = {MI_SEMAPHORE_SYNC_BR, | 1417 | .semaphore_register = {MI_SEMAPHORE_SYNC_BR, |
1497 | MI_SEMAPHORE_SYNC_BV, | 1418 | MI_SEMAPHORE_SYNC_BV, |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 68281c96c558..c8b9cc0cd0dc 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -1,13 +1,6 @@ | |||
1 | #ifndef _INTEL_RINGBUFFER_H_ | 1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | 2 | #define _INTEL_RINGBUFFER_H_ |
3 | 3 | ||
4 | enum { | ||
5 | RCS = 0x0, | ||
6 | VCS, | ||
7 | BCS, | ||
8 | I915_NUM_RINGS, | ||
9 | }; | ||
10 | |||
11 | struct intel_hw_status_page { | 4 | struct intel_hw_status_page { |
12 | u32 __iomem *page_addr; | 5 | u32 __iomem *page_addr; |
13 | unsigned int gfx_addr; | 6 | unsigned int gfx_addr; |
@@ -36,10 +29,11 @@ struct intel_hw_status_page { | |||
36 | struct intel_ring_buffer { | 29 | struct intel_ring_buffer { |
37 | const char *name; | 30 | const char *name; |
38 | enum intel_ring_id { | 31 | enum intel_ring_id { |
39 | RING_RENDER = 0x1, | 32 | RCS = 0x0, |
40 | RING_BSD = 0x2, | 33 | VCS, |
41 | RING_BLT = 0x4, | 34 | BCS, |
42 | } id; | 35 | } id; |
36 | #define I915_NUM_RINGS 3 | ||
43 | u32 mmio_base; | 37 | u32 mmio_base; |
44 | void __iomem *virtual_start; | 38 | void __iomem *virtual_start; |
45 | struct drm_device *dev; | 39 | struct drm_device *dev; |
@@ -119,6 +113,12 @@ struct intel_ring_buffer { | |||
119 | void *private; | 113 | void *private; |
120 | }; | 114 | }; |
121 | 115 | ||
116 | static inline unsigned | ||
117 | intel_ring_flag(struct intel_ring_buffer *ring) | ||
118 | { | ||
119 | return 1 << ring->id; | ||
120 | } | ||
121 | |||
122 | static inline u32 | 122 | static inline u32 |
123 | intel_ring_sync_index(struct intel_ring_buffer *ring, | 123 | intel_ring_sync_index(struct intel_ring_buffer *ring, |
124 | struct intel_ring_buffer *other) | 124 | struct intel_ring_buffer *other) |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 2288abf88cce..98444ab68bc3 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -501,7 +501,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
501 | intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); | 501 | intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); |
502 | mutex_lock(&dev->struct_mutex); | 502 | mutex_lock(&dev->struct_mutex); |
503 | } | 503 | } |
504 | i915_gem_object_unpin(old_obj); | 504 | intel_unpin_fb_obj(old_obj); |
505 | } | 505 | } |
506 | 506 | ||
507 | out_unlock: | 507 | out_unlock: |
@@ -528,7 +528,7 @@ intel_disable_plane(struct drm_plane *plane) | |||
528 | goto out; | 528 | goto out; |
529 | 529 | ||
530 | mutex_lock(&dev->struct_mutex); | 530 | mutex_lock(&dev->struct_mutex); |
531 | i915_gem_object_unpin(intel_plane->obj); | 531 | intel_unpin_fb_obj(intel_plane->obj); |
532 | intel_plane->obj = NULL; | 532 | intel_plane->obj = NULL; |
533 | mutex_unlock(&dev->struct_mutex); | 533 | mutex_unlock(&dev->struct_mutex); |
534 | out: | 534 | out: |