diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/drm/drm.h | 63 | ||||
-rw-r--r-- | include/drm/drmP.h | 249 | ||||
-rw-r--r-- | include/drm/drm_pciids.h | 54 | ||||
-rw-r--r-- | include/drm/i915_drm.h | 333 |
4 files changed, 646 insertions, 53 deletions
diff --git a/include/drm/drm.h b/include/drm/drm.h index 38d3c6b8276a..f46ba4b57da4 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h | |||
@@ -36,7 +36,6 @@ | |||
36 | #ifndef _DRM_H_ | 36 | #ifndef _DRM_H_ |
37 | #define _DRM_H_ | 37 | #define _DRM_H_ |
38 | 38 | ||
39 | #if defined(__linux__) | ||
40 | #if defined(__KERNEL__) | 39 | #if defined(__KERNEL__) |
41 | #endif | 40 | #endif |
42 | #include <asm/ioctl.h> /* For _IO* macros */ | 41 | #include <asm/ioctl.h> /* For _IO* macros */ |
@@ -46,22 +45,6 @@ | |||
46 | #define DRM_IOC_WRITE _IOC_WRITE | 45 | #define DRM_IOC_WRITE _IOC_WRITE |
47 | #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE | 46 | #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE |
48 | #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) | 47 | #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) |
49 | #elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) | ||
50 | #if defined(__FreeBSD__) && defined(IN_MODULE) | ||
51 | /* Prevent name collision when including sys/ioccom.h */ | ||
52 | #undef ioctl | ||
53 | #include <sys/ioccom.h> | ||
54 | #define ioctl(a,b,c) xf86ioctl(a,b,c) | ||
55 | #else | ||
56 | #include <sys/ioccom.h> | ||
57 | #endif /* __FreeBSD__ && xf86ioctl */ | ||
58 | #define DRM_IOCTL_NR(n) ((n) & 0xff) | ||
59 | #define DRM_IOC_VOID IOC_VOID | ||
60 | #define DRM_IOC_READ IOC_OUT | ||
61 | #define DRM_IOC_WRITE IOC_IN | ||
62 | #define DRM_IOC_READWRITE IOC_INOUT | ||
63 | #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) | ||
64 | #endif | ||
65 | 48 | ||
66 | #define DRM_MAJOR 226 | 49 | #define DRM_MAJOR 226 |
67 | #define DRM_MAX_MINOR 15 | 50 | #define DRM_MAX_MINOR 15 |
@@ -471,6 +454,7 @@ struct drm_irq_busid { | |||
471 | enum drm_vblank_seq_type { | 454 | enum drm_vblank_seq_type { |
472 | _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ | 455 | _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ |
473 | _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ | 456 | _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ |
457 | _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ | ||
474 | _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ | 458 | _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ |
475 | _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ | 459 | _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ |
476 | _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ | 460 | _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ |
@@ -503,6 +487,19 @@ union drm_wait_vblank { | |||
503 | struct drm_wait_vblank_reply reply; | 487 | struct drm_wait_vblank_reply reply; |
504 | }; | 488 | }; |
505 | 489 | ||
490 | #define _DRM_PRE_MODESET 1 | ||
491 | #define _DRM_POST_MODESET 2 | ||
492 | |||
493 | /** | ||
494 | * DRM_IOCTL_MODESET_CTL ioctl argument type | ||
495 | * | ||
496 | * \sa drmModesetCtl(). | ||
497 | */ | ||
498 | struct drm_modeset_ctl { | ||
499 | uint32_t crtc; | ||
500 | uint32_t cmd; | ||
501 | }; | ||
502 | |||
506 | /** | 503 | /** |
507 | * DRM_IOCTL_AGP_ENABLE ioctl argument type. | 504 | * DRM_IOCTL_AGP_ENABLE ioctl argument type. |
508 | * | 505 | * |
@@ -573,6 +570,34 @@ struct drm_set_version { | |||
573 | int drm_dd_minor; | 570 | int drm_dd_minor; |
574 | }; | 571 | }; |
575 | 572 | ||
573 | /** DRM_IOCTL_GEM_CLOSE ioctl argument type */ | ||
574 | struct drm_gem_close { | ||
575 | /** Handle of the object to be closed. */ | ||
576 | uint32_t handle; | ||
577 | uint32_t pad; | ||
578 | }; | ||
579 | |||
580 | /** DRM_IOCTL_GEM_FLINK ioctl argument type */ | ||
581 | struct drm_gem_flink { | ||
582 | /** Handle for the object being named */ | ||
583 | uint32_t handle; | ||
584 | |||
585 | /** Returned global name */ | ||
586 | uint32_t name; | ||
587 | }; | ||
588 | |||
589 | /** DRM_IOCTL_GEM_OPEN ioctl argument type */ | ||
590 | struct drm_gem_open { | ||
591 | /** Name of object being opened */ | ||
592 | uint32_t name; | ||
593 | |||
594 | /** Returned handle for the object */ | ||
595 | uint32_t handle; | ||
596 | |||
597 | /** Returned size of the object */ | ||
598 | uint64_t size; | ||
599 | }; | ||
600 | |||
576 | #define DRM_IOCTL_BASE 'd' | 601 | #define DRM_IOCTL_BASE 'd' |
577 | #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) | 602 | #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) |
578 | #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) | 603 | #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) |
@@ -587,6 +612,10 @@ struct drm_set_version { | |||
587 | #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) | 612 | #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) |
588 | #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) | 613 | #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) |
589 | #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) | 614 | #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) |
615 | #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) | ||
616 | #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) | ||
617 | #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) | ||
618 | #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) | ||
590 | 619 | ||
591 | #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) | 620 | #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) |
592 | #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) | 621 | #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 1c1b13e29223..59c796b46ee7 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -104,6 +104,7 @@ struct drm_device; | |||
104 | #define DRIVER_DMA_QUEUE 0x200 | 104 | #define DRIVER_DMA_QUEUE 0x200 |
105 | #define DRIVER_FB_DMA 0x400 | 105 | #define DRIVER_FB_DMA 0x400 |
106 | #define DRIVER_IRQ_VBL2 0x800 | 106 | #define DRIVER_IRQ_VBL2 0x800 |
107 | #define DRIVER_GEM 0x1000 | ||
107 | 108 | ||
108 | /***********************************************************************/ | 109 | /***********************************************************************/ |
109 | /** \name Begin the DRM... */ | 110 | /** \name Begin the DRM... */ |
@@ -387,6 +388,10 @@ struct drm_file { | |||
387 | struct drm_minor *minor; | 388 | struct drm_minor *minor; |
388 | int remove_auth_on_close; | 389 | int remove_auth_on_close; |
389 | unsigned long lock_count; | 390 | unsigned long lock_count; |
391 | /** Mapping of mm object handles to object pointers. */ | ||
392 | struct idr object_idr; | ||
393 | /** Lock for synchronization of access to object_idr. */ | ||
394 | spinlock_t table_lock; | ||
390 | struct file *filp; | 395 | struct file *filp; |
391 | void *driver_priv; | 396 | void *driver_priv; |
392 | }; | 397 | }; |
@@ -558,6 +563,56 @@ struct drm_ati_pcigart_info { | |||
558 | }; | 563 | }; |
559 | 564 | ||
560 | /** | 565 | /** |
566 | * This structure defines the drm_mm memory object, which will be used by the | ||
567 | * DRM for its buffer objects. | ||
568 | */ | ||
569 | struct drm_gem_object { | ||
570 | /** Reference count of this object */ | ||
571 | struct kref refcount; | ||
572 | |||
573 | /** Handle count of this object. Each handle also holds a reference */ | ||
574 | struct kref handlecount; | ||
575 | |||
576 | /** Related drm device */ | ||
577 | struct drm_device *dev; | ||
578 | |||
579 | /** File representing the shmem storage */ | ||
580 | struct file *filp; | ||
581 | |||
582 | /** | ||
583 | * Size of the object, in bytes. Immutable over the object's | ||
584 | * lifetime. | ||
585 | */ | ||
586 | size_t size; | ||
587 | |||
588 | /** | ||
589 | * Global name for this object, starts at 1. 0 means unnamed. | ||
590 | * Access is covered by the object_name_lock in the related drm_device | ||
591 | */ | ||
592 | int name; | ||
593 | |||
594 | /** | ||
595 | * Memory domains. These monitor which caches contain read/write data | ||
596 | * related to the object. When transitioning from one set of domains | ||
597 | * to another, the driver is called to ensure that caches are suitably | ||
598 | * flushed and invalidated | ||
599 | */ | ||
600 | uint32_t read_domains; | ||
601 | uint32_t write_domain; | ||
602 | |||
603 | /** | ||
604 | * While validating an exec operation, the | ||
605 | * new read/write domain values are computed here. | ||
606 | * They will be transferred to the above values | ||
607 | * at the point that any cache flushing occurs | ||
608 | */ | ||
609 | uint32_t pending_read_domains; | ||
610 | uint32_t pending_write_domain; | ||
611 | |||
612 | void *driver_private; | ||
613 | }; | ||
614 | |||
615 | /** | ||
561 | * DRM driver structure. This structure represent the common code for | 616 | * DRM driver structure. This structure represent the common code for |
562 | * a family of cards. There will one drm_device for each card present | 617 | * a family of cards. There will one drm_device for each card present |
563 | * in this family | 618 | * in this family |
@@ -580,11 +635,54 @@ struct drm_driver { | |||
580 | int (*kernel_context_switch) (struct drm_device *dev, int old, | 635 | int (*kernel_context_switch) (struct drm_device *dev, int old, |
581 | int new); | 636 | int new); |
582 | void (*kernel_context_switch_unlock) (struct drm_device *dev); | 637 | void (*kernel_context_switch_unlock) (struct drm_device *dev); |
583 | int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence); | ||
584 | int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence); | ||
585 | int (*dri_library_name) (struct drm_device *dev, char *buf); | 638 | int (*dri_library_name) (struct drm_device *dev, char *buf); |
586 | 639 | ||
587 | /** | 640 | /** |
641 | * get_vblank_counter - get raw hardware vblank counter | ||
642 | * @dev: DRM device | ||
643 | * @crtc: counter to fetch | ||
644 | * | ||
645 | * Driver callback for fetching a raw hardware vblank counter | ||
646 | * for @crtc. If a device doesn't have a hardware counter, the | ||
647 | * driver can simply return the value of drm_vblank_count and | ||
648 | * make the enable_vblank() and disable_vblank() hooks into no-ops, | ||
649 | * leaving interrupts enabled at all times. | ||
650 | * | ||
651 | * Wraparound handling and loss of events due to modesetting is dealt | ||
652 | * with in the DRM core code. | ||
653 | * | ||
654 | * RETURNS | ||
655 | * Raw vblank counter value. | ||
656 | */ | ||
657 | u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); | ||
658 | |||
659 | /** | ||
660 | * enable_vblank - enable vblank interrupt events | ||
661 | * @dev: DRM device | ||
662 | * @crtc: which irq to enable | ||
663 | * | ||
664 | * Enable vblank interrupts for @crtc. If the device doesn't have | ||
665 | * a hardware vblank counter, this routine should be a no-op, since | ||
666 | * interrupts will have to stay on to keep the count accurate. | ||
667 | * | ||
668 | * RETURNS | ||
669 | * Zero on success, appropriate errno if the given @crtc's vblank | ||
670 | * interrupt cannot be enabled. | ||
671 | */ | ||
672 | int (*enable_vblank) (struct drm_device *dev, int crtc); | ||
673 | |||
674 | /** | ||
675 | * disable_vblank - disable vblank interrupt events | ||
676 | * @dev: DRM device | ||
677 | * @crtc: which irq to enable | ||
678 | * | ||
679 | * Disable vblank interrupts for @crtc. If the device doesn't have | ||
680 | * a hardware vblank counter, this routine should be a no-op, since | ||
681 | * interrupts will have to stay on to keep the count accurate. | ||
682 | */ | ||
683 | void (*disable_vblank) (struct drm_device *dev, int crtc); | ||
684 | |||
685 | /** | ||
588 | * Called by \c drm_device_is_agp. Typically used to determine if a | 686 | * Called by \c drm_device_is_agp. Typically used to determine if a |
589 | * card is really attached to AGP or not. | 687 | * card is really attached to AGP or not. |
590 | * | 688 | * |
@@ -601,7 +699,7 @@ struct drm_driver { | |||
601 | 699 | ||
602 | irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); | 700 | irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); |
603 | void (*irq_preinstall) (struct drm_device *dev); | 701 | void (*irq_preinstall) (struct drm_device *dev); |
604 | void (*irq_postinstall) (struct drm_device *dev); | 702 | int (*irq_postinstall) (struct drm_device *dev); |
605 | void (*irq_uninstall) (struct drm_device *dev); | 703 | void (*irq_uninstall) (struct drm_device *dev); |
606 | void (*reclaim_buffers) (struct drm_device *dev, | 704 | void (*reclaim_buffers) (struct drm_device *dev, |
607 | struct drm_file * file_priv); | 705 | struct drm_file * file_priv); |
@@ -614,6 +712,18 @@ struct drm_driver { | |||
614 | void (*set_version) (struct drm_device *dev, | 712 | void (*set_version) (struct drm_device *dev, |
615 | struct drm_set_version *sv); | 713 | struct drm_set_version *sv); |
616 | 714 | ||
715 | int (*proc_init)(struct drm_minor *minor); | ||
716 | void (*proc_cleanup)(struct drm_minor *minor); | ||
717 | |||
718 | /** | ||
719 | * Driver-specific constructor for drm_gem_objects, to set up | ||
720 | * obj->driver_private. | ||
721 | * | ||
722 | * Returns 0 on success. | ||
723 | */ | ||
724 | int (*gem_init_object) (struct drm_gem_object *obj); | ||
725 | void (*gem_free_object) (struct drm_gem_object *obj); | ||
726 | |||
617 | int major; | 727 | int major; |
618 | int minor; | 728 | int minor; |
619 | int patchlevel; | 729 | int patchlevel; |
@@ -714,7 +824,6 @@ struct drm_device { | |||
714 | 824 | ||
715 | /** \name Context support */ | 825 | /** \name Context support */ |
716 | /*@{ */ | 826 | /*@{ */ |
717 | int irq; /**< Interrupt used by board */ | ||
718 | int irq_enabled; /**< True if irq handler is enabled */ | 827 | int irq_enabled; /**< True if irq handler is enabled */ |
719 | __volatile__ long context_flag; /**< Context swapping flag */ | 828 | __volatile__ long context_flag; /**< Context swapping flag */ |
720 | __volatile__ long interrupt_flag; /**< Interruption handler flag */ | 829 | __volatile__ long interrupt_flag; /**< Interruption handler flag */ |
@@ -730,13 +839,28 @@ struct drm_device { | |||
730 | /** \name VBLANK IRQ support */ | 839 | /** \name VBLANK IRQ support */ |
731 | /*@{ */ | 840 | /*@{ */ |
732 | 841 | ||
733 | wait_queue_head_t vbl_queue; /**< VBLANK wait queue */ | 842 | /* |
734 | atomic_t vbl_received; | 843 | * At load time, disabling the vblank interrupt won't be allowed since |
735 | atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */ | 844 | * old clients may not call the modeset ioctl and therefore misbehave. |
845 | * Once the modeset ioctl *has* been called though, we can safely | ||
846 | * disable them when unused. | ||
847 | */ | ||
848 | int vblank_disable_allowed; | ||
849 | |||
850 | wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ | ||
851 | atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ | ||
736 | spinlock_t vbl_lock; | 852 | spinlock_t vbl_lock; |
737 | struct list_head vbl_sigs; /**< signal list to send on VBLANK */ | 853 | struct list_head *vbl_sigs; /**< signal list to send on VBLANK */ |
738 | struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */ | 854 | atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/ |
739 | unsigned int vbl_pending; | 855 | atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ |
856 | u32 *last_vblank; /* protected by dev->vbl_lock, used */ | ||
857 | /* for wraparound handling */ | ||
858 | int *vblank_enabled; /* so we don't call enable more than | ||
859 | once per disable */ | ||
860 | int *vblank_inmodeset; /* Display driver is setting mode */ | ||
861 | struct timer_list vblank_disable_timer; | ||
862 | |||
863 | u32 max_vblank_count; /**< size of vblank counter register */ | ||
740 | spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ | 864 | spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ |
741 | void (*locked_tasklet_func)(struct drm_device *dev); | 865 | void (*locked_tasklet_func)(struct drm_device *dev); |
742 | 866 | ||
@@ -757,6 +881,7 @@ struct drm_device { | |||
757 | struct pci_controller *hose; | 881 | struct pci_controller *hose; |
758 | #endif | 882 | #endif |
759 | struct drm_sg_mem *sg; /**< Scatter gather memory */ | 883 | struct drm_sg_mem *sg; /**< Scatter gather memory */ |
884 | int num_crtcs; /**< Number of CRTCs on this device */ | ||
760 | void *dev_private; /**< device private data */ | 885 | void *dev_private; /**< device private data */ |
761 | struct drm_sigdata sigdata; /**< For block_all_signals */ | 886 | struct drm_sigdata sigdata; /**< For block_all_signals */ |
762 | sigset_t sigmask; | 887 | sigset_t sigmask; |
@@ -771,8 +896,29 @@ struct drm_device { | |||
771 | spinlock_t drw_lock; | 896 | spinlock_t drw_lock; |
772 | struct idr drw_idr; | 897 | struct idr drw_idr; |
773 | /*@} */ | 898 | /*@} */ |
899 | |||
900 | /** \name GEM information */ | ||
901 | /*@{ */ | ||
902 | spinlock_t object_name_lock; | ||
903 | struct idr object_name_idr; | ||
904 | atomic_t object_count; | ||
905 | atomic_t object_memory; | ||
906 | atomic_t pin_count; | ||
907 | atomic_t pin_memory; | ||
908 | atomic_t gtt_count; | ||
909 | atomic_t gtt_memory; | ||
910 | uint32_t gtt_total; | ||
911 | uint32_t invalidate_domains; /* domains pending invalidation */ | ||
912 | uint32_t flush_domains; /* domains pending flush */ | ||
913 | /*@} */ | ||
914 | |||
774 | }; | 915 | }; |
775 | 916 | ||
917 | static inline int drm_dev_to_irq(struct drm_device *dev) | ||
918 | { | ||
919 | return dev->pdev->irq; | ||
920 | } | ||
921 | |||
776 | static __inline__ int drm_core_check_feature(struct drm_device *dev, | 922 | static __inline__ int drm_core_check_feature(struct drm_device *dev, |
777 | int feature) | 923 | int feature) |
778 | { | 924 | { |
@@ -867,6 +1013,11 @@ extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); | |||
867 | extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); | 1013 | extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); |
868 | extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); | 1014 | extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); |
869 | extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); | 1015 | extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); |
1016 | extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, | ||
1017 | struct page **pages, | ||
1018 | unsigned long num_pages, | ||
1019 | uint32_t gtt_offset, | ||
1020 | uint32_t type); | ||
870 | extern int drm_unbind_agp(DRM_AGP_MEM * handle); | 1021 | extern int drm_unbind_agp(DRM_AGP_MEM * handle); |
871 | 1022 | ||
872 | /* Misc. IOCTL support (drm_ioctl.h) */ | 1023 | /* Misc. IOCTL support (drm_ioctl.h) */ |
@@ -929,6 +1080,9 @@ extern int drm_getmagic(struct drm_device *dev, void *data, | |||
929 | extern int drm_authmagic(struct drm_device *dev, void *data, | 1080 | extern int drm_authmagic(struct drm_device *dev, void *data, |
930 | struct drm_file *file_priv); | 1081 | struct drm_file *file_priv); |
931 | 1082 | ||
1083 | /* Cache management (drm_cache.c) */ | ||
1084 | void drm_clflush_pages(struct page *pages[], unsigned long num_pages); | ||
1085 | |||
932 | /* Locking IOCTL support (drm_lock.h) */ | 1086 | /* Locking IOCTL support (drm_lock.h) */ |
933 | extern int drm_lock(struct drm_device *dev, void *data, | 1087 | extern int drm_lock(struct drm_device *dev, void *data, |
934 | struct drm_file *file_priv); | 1088 | struct drm_file *file_priv); |
@@ -985,15 +1139,25 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev, | |||
985 | extern int drm_control(struct drm_device *dev, void *data, | 1139 | extern int drm_control(struct drm_device *dev, void *data, |
986 | struct drm_file *file_priv); | 1140 | struct drm_file *file_priv); |
987 | extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); | 1141 | extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); |
1142 | extern int drm_irq_install(struct drm_device *dev); | ||
988 | extern int drm_irq_uninstall(struct drm_device *dev); | 1143 | extern int drm_irq_uninstall(struct drm_device *dev); |
989 | extern void drm_driver_irq_preinstall(struct drm_device *dev); | 1144 | extern void drm_driver_irq_preinstall(struct drm_device *dev); |
990 | extern void drm_driver_irq_postinstall(struct drm_device *dev); | 1145 | extern void drm_driver_irq_postinstall(struct drm_device *dev); |
991 | extern void drm_driver_irq_uninstall(struct drm_device *dev); | 1146 | extern void drm_driver_irq_uninstall(struct drm_device *dev); |
992 | 1147 | ||
1148 | extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); | ||
993 | extern int drm_wait_vblank(struct drm_device *dev, void *data, | 1149 | extern int drm_wait_vblank(struct drm_device *dev, void *data, |
994 | struct drm_file *file_priv); | 1150 | struct drm_file *filp); |
995 | extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); | 1151 | extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); |
996 | extern void drm_vbl_send_signals(struct drm_device *dev); | 1152 | extern void drm_locked_tasklet(struct drm_device *dev, |
1153 | void(*func)(struct drm_device *)); | ||
1154 | extern u32 drm_vblank_count(struct drm_device *dev, int crtc); | ||
1155 | extern void drm_handle_vblank(struct drm_device *dev, int crtc); | ||
1156 | extern int drm_vblank_get(struct drm_device *dev, int crtc); | ||
1157 | extern void drm_vblank_put(struct drm_device *dev, int crtc); | ||
1158 | /* Modesetting support */ | ||
1159 | extern int drm_modeset_ctl(struct drm_device *dev, void *data, | ||
1160 | struct drm_file *file_priv); | ||
997 | extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); | 1161 | extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); |
998 | 1162 | ||
999 | /* AGP/GART support (drm_agpsupport.h) */ | 1163 | /* AGP/GART support (drm_agpsupport.h) */ |
@@ -1026,6 +1190,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size | |||
1026 | extern int drm_agp_free_memory(DRM_AGP_MEM * handle); | 1190 | extern int drm_agp_free_memory(DRM_AGP_MEM * handle); |
1027 | extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); | 1191 | extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); |
1028 | extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); | 1192 | extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); |
1193 | extern void drm_agp_chipset_flush(struct drm_device *dev); | ||
1029 | 1194 | ||
1030 | /* Stub support (drm_stub.h) */ | 1195 | /* Stub support (drm_stub.h) */ |
1031 | extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | 1196 | extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, |
@@ -1088,6 +1253,66 @@ extern unsigned long drm_mm_tail_space(struct drm_mm *mm); | |||
1088 | extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); | 1253 | extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); |
1089 | extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); | 1254 | extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); |
1090 | 1255 | ||
1256 | /* Graphics Execution Manager library functions (drm_gem.c) */ | ||
1257 | int drm_gem_init(struct drm_device *dev); | ||
1258 | void drm_gem_object_free(struct kref *kref); | ||
1259 | struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, | ||
1260 | size_t size); | ||
1261 | void drm_gem_object_handle_free(struct kref *kref); | ||
1262 | |||
1263 | static inline void | ||
1264 | drm_gem_object_reference(struct drm_gem_object *obj) | ||
1265 | { | ||
1266 | kref_get(&obj->refcount); | ||
1267 | } | ||
1268 | |||
1269 | static inline void | ||
1270 | drm_gem_object_unreference(struct drm_gem_object *obj) | ||
1271 | { | ||
1272 | if (obj == NULL) | ||
1273 | return; | ||
1274 | |||
1275 | kref_put(&obj->refcount, drm_gem_object_free); | ||
1276 | } | ||
1277 | |||
1278 | int drm_gem_handle_create(struct drm_file *file_priv, | ||
1279 | struct drm_gem_object *obj, | ||
1280 | int *handlep); | ||
1281 | |||
1282 | static inline void | ||
1283 | drm_gem_object_handle_reference(struct drm_gem_object *obj) | ||
1284 | { | ||
1285 | drm_gem_object_reference(obj); | ||
1286 | kref_get(&obj->handlecount); | ||
1287 | } | ||
1288 | |||
1289 | static inline void | ||
1290 | drm_gem_object_handle_unreference(struct drm_gem_object *obj) | ||
1291 | { | ||
1292 | if (obj == NULL) | ||
1293 | return; | ||
1294 | |||
1295 | /* | ||
1296 | * Must bump handle count first as this may be the last | ||
1297 | * ref, in which case the object would disappear before we | ||
1298 | * checked for a name | ||
1299 | */ | ||
1300 | kref_put(&obj->handlecount, drm_gem_object_handle_free); | ||
1301 | drm_gem_object_unreference(obj); | ||
1302 | } | ||
1303 | |||
1304 | struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, | ||
1305 | struct drm_file *filp, | ||
1306 | int handle); | ||
1307 | int drm_gem_close_ioctl(struct drm_device *dev, void *data, | ||
1308 | struct drm_file *file_priv); | ||
1309 | int drm_gem_flink_ioctl(struct drm_device *dev, void *data, | ||
1310 | struct drm_file *file_priv); | ||
1311 | int drm_gem_open_ioctl(struct drm_device *dev, void *data, | ||
1312 | struct drm_file *file_priv); | ||
1313 | void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); | ||
1314 | void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); | ||
1315 | |||
1091 | extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); | 1316 | extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); |
1092 | extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev); | 1317 | extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev); |
1093 | extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); | 1318 | extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 135bd19499fc..da04109741e8 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -84,18 +84,18 @@ | |||
84 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 84 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
85 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 85 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
86 | {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 86 | {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
87 | {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 87 | {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
88 | {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 88 | {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
89 | {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 89 | {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
90 | {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 90 | {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
91 | {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 91 | {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
92 | {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 92 | {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
93 | {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 93 | {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
94 | {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 94 | {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
95 | {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 95 | {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
96 | {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 96 | {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
97 | {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 97 | {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
98 | {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 98 | {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
99 | {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 99 | {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
100 | {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 100 | {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
101 | {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 101 | {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
@@ -113,8 +113,10 @@ | |||
113 | {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 113 | {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
114 | {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 114 | {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
115 | {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ | 115 | {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ |
116 | {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | 116 | {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ |
117 | {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | 117 | {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
118 | {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ | ||
119 | {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | ||
118 | {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 120 | {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
119 | {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 121 | {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
120 | {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 122 | {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
@@ -122,16 +124,16 @@ | |||
122 | {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 124 | {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
123 | {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ | 125 | {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ |
124 | {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ | 126 | {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ |
125 | {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 127 | {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
126 | {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 128 | {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
127 | {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 129 | {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
128 | {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 130 | {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
129 | {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 131 | {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
130 | {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 132 | {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
131 | {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 133 | {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
132 | {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 134 | {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
133 | {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 135 | {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
134 | {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ | 136 | {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
135 | {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ | 137 | {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ |
136 | {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ | 138 | {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ |
137 | {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ | 139 | {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ |
@@ -237,6 +239,10 @@ | |||
237 | {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 239 | {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
238 | {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ | 240 | {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ |
239 | {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ | 241 | {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ |
242 | {0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ | ||
243 | {0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ | ||
244 | {0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ | ||
245 | {0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ | ||
240 | {0, 0, 0} | 246 | {0, 0, 0} |
241 | 247 | ||
242 | #define r128_PCI_IDS \ | 248 | #define r128_PCI_IDS \ |
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 05c66cf03a9e..eb4b35031a55 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
@@ -143,6 +143,22 @@ typedef struct _drm_i915_sarea { | |||
143 | #define DRM_I915_GET_VBLANK_PIPE 0x0e | 143 | #define DRM_I915_GET_VBLANK_PIPE 0x0e |
144 | #define DRM_I915_VBLANK_SWAP 0x0f | 144 | #define DRM_I915_VBLANK_SWAP 0x0f |
145 | #define DRM_I915_HWS_ADDR 0x11 | 145 | #define DRM_I915_HWS_ADDR 0x11 |
146 | #define DRM_I915_GEM_INIT 0x13 | ||
147 | #define DRM_I915_GEM_EXECBUFFER 0x14 | ||
148 | #define DRM_I915_GEM_PIN 0x15 | ||
149 | #define DRM_I915_GEM_UNPIN 0x16 | ||
150 | #define DRM_I915_GEM_BUSY 0x17 | ||
151 | #define DRM_I915_GEM_THROTTLE 0x18 | ||
152 | #define DRM_I915_GEM_ENTERVT 0x19 | ||
153 | #define DRM_I915_GEM_LEAVEVT 0x1a | ||
154 | #define DRM_I915_GEM_CREATE 0x1b | ||
155 | #define DRM_I915_GEM_PREAD 0x1c | ||
156 | #define DRM_I915_GEM_PWRITE 0x1d | ||
157 | #define DRM_I915_GEM_MMAP 0x1e | ||
158 | #define DRM_I915_GEM_SET_DOMAIN 0x1f | ||
159 | #define DRM_I915_GEM_SW_FINISH 0x20 | ||
160 | #define DRM_I915_GEM_SET_TILING 0x21 | ||
161 | #define DRM_I915_GEM_GET_TILING 0x22 | ||
146 | 162 | ||
147 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) | 163 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) |
148 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) | 164 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) |
@@ -160,6 +176,20 @@ typedef struct _drm_i915_sarea { | |||
160 | #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | 176 | #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
161 | #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | 177 | #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
162 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) | 178 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) |
179 | #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) | ||
180 | #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) | ||
181 | #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) | ||
182 | #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) | ||
183 | #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) | ||
184 | #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) | ||
185 | #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) | ||
186 | #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) | ||
187 | #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) | ||
188 | #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) | ||
189 | #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) | ||
190 | #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) | ||
191 | #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) | ||
192 | #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) | ||
163 | 193 | ||
164 | /* Allow drivers to submit batchbuffers directly to hardware, relying | 194 | /* Allow drivers to submit batchbuffers directly to hardware, relying |
165 | * on the security mechanisms provided by hardware. | 195 | * on the security mechanisms provided by hardware. |
@@ -200,6 +230,8 @@ typedef struct drm_i915_irq_wait { | |||
200 | #define I915_PARAM_IRQ_ACTIVE 1 | 230 | #define I915_PARAM_IRQ_ACTIVE 1 |
201 | #define I915_PARAM_ALLOW_BATCHBUFFER 2 | 231 | #define I915_PARAM_ALLOW_BATCHBUFFER 2 |
202 | #define I915_PARAM_LAST_DISPATCH 3 | 232 | #define I915_PARAM_LAST_DISPATCH 3 |
233 | #define I915_PARAM_CHIPSET_ID 4 | ||
234 | #define I915_PARAM_HAS_GEM 5 | ||
203 | 235 | ||
204 | typedef struct drm_i915_getparam { | 236 | typedef struct drm_i915_getparam { |
205 | int param; | 237 | int param; |
@@ -267,4 +299,305 @@ typedef struct drm_i915_hws_addr { | |||
267 | uint64_t addr; | 299 | uint64_t addr; |
268 | } drm_i915_hws_addr_t; | 300 | } drm_i915_hws_addr_t; |
269 | 301 | ||
302 | struct drm_i915_gem_init { | ||
303 | /** | ||
304 | * Beginning offset in the GTT to be managed by the DRM memory | ||
305 | * manager. | ||
306 | */ | ||
307 | uint64_t gtt_start; | ||
308 | /** | ||
309 | * Ending offset in the GTT to be managed by the DRM memory | ||
310 | * manager. | ||
311 | */ | ||
312 | uint64_t gtt_end; | ||
313 | }; | ||
314 | |||
315 | struct drm_i915_gem_create { | ||
316 | /** | ||
317 | * Requested size for the object. | ||
318 | * | ||
319 | * The (page-aligned) allocated size for the object will be returned. | ||
320 | */ | ||
321 | uint64_t size; | ||
322 | /** | ||
323 | * Returned handle for the object. | ||
324 | * | ||
325 | * Object handles are nonzero. | ||
326 | */ | ||
327 | uint32_t handle; | ||
328 | uint32_t pad; | ||
329 | }; | ||
330 | |||
331 | struct drm_i915_gem_pread { | ||
332 | /** Handle for the object being read. */ | ||
333 | uint32_t handle; | ||
334 | uint32_t pad; | ||
335 | /** Offset into the object to read from */ | ||
336 | uint64_t offset; | ||
337 | /** Length of data to read */ | ||
338 | uint64_t size; | ||
339 | /** | ||
340 | * Pointer to write the data into. | ||
341 | * | ||
342 | * This is a fixed-size type for 32/64 compatibility. | ||
343 | */ | ||
344 | uint64_t data_ptr; | ||
345 | }; | ||
346 | |||
347 | struct drm_i915_gem_pwrite { | ||
348 | /** Handle for the object being written to. */ | ||
349 | uint32_t handle; | ||
350 | uint32_t pad; | ||
351 | /** Offset into the object to write to */ | ||
352 | uint64_t offset; | ||
353 | /** Length of data to write */ | ||
354 | uint64_t size; | ||
355 | /** | ||
356 | * Pointer to read the data from. | ||
357 | * | ||
358 | * This is a fixed-size type for 32/64 compatibility. | ||
359 | */ | ||
360 | uint64_t data_ptr; | ||
361 | }; | ||
362 | |||
363 | struct drm_i915_gem_mmap { | ||
364 | /** Handle for the object being mapped. */ | ||
365 | uint32_t handle; | ||
366 | uint32_t pad; | ||
367 | /** Offset in the object to map. */ | ||
368 | uint64_t offset; | ||
369 | /** | ||
370 | * Length of data to map. | ||
371 | * | ||
372 | * The value will be page-aligned. | ||
373 | */ | ||
374 | uint64_t size; | ||
375 | /** | ||
376 | * Returned pointer the data was mapped at. | ||
377 | * | ||
378 | * This is a fixed-size type for 32/64 compatibility. | ||
379 | */ | ||
380 | uint64_t addr_ptr; | ||
381 | }; | ||
382 | |||
383 | struct drm_i915_gem_set_domain { | ||
384 | /** Handle for the object */ | ||
385 | uint32_t handle; | ||
386 | |||
387 | /** New read domains */ | ||
388 | uint32_t read_domains; | ||
389 | |||
390 | /** New write domain */ | ||
391 | uint32_t write_domain; | ||
392 | }; | ||
393 | |||
394 | struct drm_i915_gem_sw_finish { | ||
395 | /** Handle for the object */ | ||
396 | uint32_t handle; | ||
397 | }; | ||
398 | |||
399 | struct drm_i915_gem_relocation_entry { | ||
400 | /** | ||
401 | * Handle of the buffer being pointed to by this relocation entry. | ||
402 | * | ||
403 | * It's appealing to make this be an index into the mm_validate_entry | ||
404 | * list to refer to the buffer, but this allows the driver to create | ||
405 | * a relocation list for state buffers and not re-write it per | ||
406 | * exec using the buffer. | ||
407 | */ | ||
408 | uint32_t target_handle; | ||
409 | |||
410 | /** | ||
411 | * Value to be added to the offset of the target buffer to make up | ||
412 | * the relocation entry. | ||
413 | */ | ||
414 | uint32_t delta; | ||
415 | |||
416 | /** Offset in the buffer the relocation entry will be written into */ | ||
417 | uint64_t offset; | ||
418 | |||
419 | /** | ||
420 | * Offset value of the target buffer that the relocation entry was last | ||
421 | * written as. | ||
422 | * | ||
423 | * If the buffer has the same offset as last time, we can skip syncing | ||
424 | * and writing the relocation. This value is written back out by | ||
425 | * the execbuffer ioctl when the relocation is written. | ||
426 | */ | ||
427 | uint64_t presumed_offset; | ||
428 | |||
429 | /** | ||
430 | * Target memory domains read by this operation. | ||
431 | */ | ||
432 | uint32_t read_domains; | ||
433 | |||
434 | /** | ||
435 | * Target memory domains written by this operation. | ||
436 | * | ||
437 | * Note that only one domain may be written by the whole | ||
438 | * execbuffer operation, so that where there are conflicts, | ||
439 | * the application will get -EINVAL back. | ||
440 | */ | ||
441 | uint32_t write_domain; | ||
442 | }; | ||
443 | |||
444 | /** @{ | ||
445 | * Intel memory domains | ||
446 | * | ||
447 | * Most of these just align with the various caches in | ||
448 | * the system and are used to flush and invalidate as | ||
449 | * objects end up cached in different domains. | ||
450 | */ | ||
451 | /** CPU cache */ | ||
452 | #define I915_GEM_DOMAIN_CPU 0x00000001 | ||
453 | /** Render cache, used by 2D and 3D drawing */ | ||
454 | #define I915_GEM_DOMAIN_RENDER 0x00000002 | ||
455 | /** Sampler cache, used by texture engine */ | ||
456 | #define I915_GEM_DOMAIN_SAMPLER 0x00000004 | ||
457 | /** Command queue, used to load batch buffers */ | ||
458 | #define I915_GEM_DOMAIN_COMMAND 0x00000008 | ||
459 | /** Instruction cache, used by shader programs */ | ||
460 | #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 | ||
461 | /** Vertex address cache */ | ||
462 | #define I915_GEM_DOMAIN_VERTEX 0x00000020 | ||
463 | /** GTT domain - aperture and scanout */ | ||
464 | #define I915_GEM_DOMAIN_GTT 0x00000040 | ||
465 | /** @} */ | ||
466 | |||
467 | struct drm_i915_gem_exec_object { | ||
468 | /** | ||
469 | * User's handle for a buffer to be bound into the GTT for this | ||
470 | * operation. | ||
471 | */ | ||
472 | uint32_t handle; | ||
473 | |||
474 | /** Number of relocations to be performed on this buffer */ | ||
475 | uint32_t relocation_count; | ||
476 | /** | ||
477 | * Pointer to array of struct drm_i915_gem_relocation_entry containing | ||
478 | * the relocations to be performed in this buffer. | ||
479 | */ | ||
480 | uint64_t relocs_ptr; | ||
481 | |||
482 | /** Required alignment in graphics aperture */ | ||
483 | uint64_t alignment; | ||
484 | |||
485 | /** | ||
486 | * Returned value of the updated offset of the object, for future | ||
487 | * presumed_offset writes. | ||
488 | */ | ||
489 | uint64_t offset; | ||
490 | }; | ||
491 | |||
492 | struct drm_i915_gem_execbuffer { | ||
493 | /** | ||
494 | * List of buffers to be validated with their relocations to be | ||
495 | * performend on them. | ||
496 | * | ||
497 | * This is a pointer to an array of struct drm_i915_gem_validate_entry. | ||
498 | * | ||
499 | * These buffers must be listed in an order such that all relocations | ||
500 | * a buffer is performing refer to buffers that have already appeared | ||
501 | * in the validate list. | ||
502 | */ | ||
503 | uint64_t buffers_ptr; | ||
504 | uint32_t buffer_count; | ||
505 | |||
506 | /** Offset in the batchbuffer to start execution from. */ | ||
507 | uint32_t batch_start_offset; | ||
508 | /** Bytes used in batchbuffer from batch_start_offset */ | ||
509 | uint32_t batch_len; | ||
510 | uint32_t DR1; | ||
511 | uint32_t DR4; | ||
512 | uint32_t num_cliprects; | ||
513 | /** This is a struct drm_clip_rect *cliprects */ | ||
514 | uint64_t cliprects_ptr; | ||
515 | }; | ||
516 | |||
517 | struct drm_i915_gem_pin { | ||
518 | /** Handle of the buffer to be pinned. */ | ||
519 | uint32_t handle; | ||
520 | uint32_t pad; | ||
521 | |||
522 | /** alignment required within the aperture */ | ||
523 | uint64_t alignment; | ||
524 | |||
525 | /** Returned GTT offset of the buffer. */ | ||
526 | uint64_t offset; | ||
527 | }; | ||
528 | |||
529 | struct drm_i915_gem_unpin { | ||
530 | /** Handle of the buffer to be unpinned. */ | ||
531 | uint32_t handle; | ||
532 | uint32_t pad; | ||
533 | }; | ||
534 | |||
535 | struct drm_i915_gem_busy { | ||
536 | /** Handle of the buffer to check for busy */ | ||
537 | uint32_t handle; | ||
538 | |||
539 | /** Return busy status (1 if busy, 0 if idle) */ | ||
540 | uint32_t busy; | ||
541 | }; | ||
542 | |||
543 | #define I915_TILING_NONE 0 | ||
544 | #define I915_TILING_X 1 | ||
545 | #define I915_TILING_Y 2 | ||
546 | |||
547 | #define I915_BIT_6_SWIZZLE_NONE 0 | ||
548 | #define I915_BIT_6_SWIZZLE_9 1 | ||
549 | #define I915_BIT_6_SWIZZLE_9_10 2 | ||
550 | #define I915_BIT_6_SWIZZLE_9_11 3 | ||
551 | #define I915_BIT_6_SWIZZLE_9_10_11 4 | ||
552 | /* Not seen by userland */ | ||
553 | #define I915_BIT_6_SWIZZLE_UNKNOWN 5 | ||
554 | |||
555 | struct drm_i915_gem_set_tiling { | ||
556 | /** Handle of the buffer to have its tiling state updated */ | ||
557 | uint32_t handle; | ||
558 | |||
559 | /** | ||
560 | * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, | ||
561 | * I915_TILING_Y). | ||
562 | * | ||
563 | * This value is to be set on request, and will be updated by the | ||
564 | * kernel on successful return with the actual chosen tiling layout. | ||
565 | * | ||
566 | * The tiling mode may be demoted to I915_TILING_NONE when the system | ||
567 | * has bit 6 swizzling that can't be managed correctly by GEM. | ||
568 | * | ||
569 | * Buffer contents become undefined when changing tiling_mode. | ||
570 | */ | ||
571 | uint32_t tiling_mode; | ||
572 | |||
573 | /** | ||
574 | * Stride in bytes for the object when in I915_TILING_X or | ||
575 | * I915_TILING_Y. | ||
576 | */ | ||
577 | uint32_t stride; | ||
578 | |||
579 | /** | ||
580 | * Returned address bit 6 swizzling required for CPU access through | ||
581 | * mmap mapping. | ||
582 | */ | ||
583 | uint32_t swizzle_mode; | ||
584 | }; | ||
585 | |||
586 | struct drm_i915_gem_get_tiling { | ||
587 | /** Handle of the buffer to get tiling state for. */ | ||
588 | uint32_t handle; | ||
589 | |||
590 | /** | ||
591 | * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, | ||
592 | * I915_TILING_Y). | ||
593 | */ | ||
594 | uint32_t tiling_mode; | ||
595 | |||
596 | /** | ||
597 | * Returned address bit 6 swizzling required for CPU access through | ||
598 | * mmap mapping. | ||
599 | */ | ||
600 | uint32_t swizzle_mode; | ||
601 | }; | ||
602 | |||
270 | #endif /* _I915_DRM_H_ */ | 603 | #endif /* _I915_DRM_H_ */ |