aboutsummaryrefslogtreecommitdiffstats
path: root/include/drm
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /include/drm
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/Kbuild2
-rw-r--r--include/drm/drm.h65
-rw-r--r--include/drm/drmP.h129
-rw-r--r--include/drm/drm_buffer.h148
-rw-r--r--include/drm/drm_crtc.h49
-rw-r--r--include/drm/drm_dp_helper.h180
-rw-r--r--include/drm/drm_edid.h11
-rw-r--r--include/drm/drm_mem_util.h65
-rw-r--r--include/drm/drm_mm.h35
-rw-r--r--include/drm/drm_mode.h83
-rw-r--r--include/drm/drm_os_linux.h2
-rw-r--r--include/drm/drm_pciids.h38
-rw-r--r--include/drm/i2c/ch7006.h86
-rw-r--r--include/drm/i915_drm.h132
-rw-r--r--include/drm/mga_drm.h2
-rw-r--r--include/drm/nouveau_drm.h199
-rw-r--r--include/drm/radeon_drm.h3
-rw-r--r--include/drm/ttm/ttm_bo_api.h115
-rw-r--r--include/drm/ttm/ttm_bo_driver.h71
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h107
-rw-r--r--include/drm/ttm/ttm_lock.h247
-rw-r--r--include/drm/ttm/ttm_memory.h1
-rw-r--r--include/drm/ttm/ttm_object.h271
-rw-r--r--include/drm/via_drm.h2
-rw-r--r--include/drm/vmwgfx_drm.h588
25 files changed, 2446 insertions, 185 deletions
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index b940fdfa3b25..bd3a1c2fbdb4 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -7,4 +7,6 @@ unifdef-y += r128_drm.h
7unifdef-y += radeon_drm.h 7unifdef-y += radeon_drm.h
8unifdef-y += sis_drm.h 8unifdef-y += sis_drm.h
9unifdef-y += savage_drm.h 9unifdef-y += savage_drm.h
10unifdef-y += vmwgfx_drm.h
10unifdef-y += via_drm.h 11unifdef-y += via_drm.h
12unifdef-y += nouveau_drm.h
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 7cb50bdde46d..e3f46e0cb7dc 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -36,17 +36,27 @@
36#ifndef _DRM_H_ 36#ifndef _DRM_H_
37#define _DRM_H_ 37#define _DRM_H_
38 38
39#if defined(__linux__)
40
39#include <linux/types.h> 41#include <linux/types.h>
40#include <asm/ioctl.h> /* For _IO* macros */ 42#include <asm/ioctl.h>
41#define DRM_IOCTL_NR(n) _IOC_NR(n) 43typedef unsigned int drm_handle_t;
42#define DRM_IOC_VOID _IOC_NONE
43#define DRM_IOC_READ _IOC_READ
44#define DRM_IOC_WRITE _IOC_WRITE
45#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
46#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
47 44
48#define DRM_MAJOR 226 45#else /* One of the BSDs */
49#define DRM_MAX_MINOR 15 46
47#include <sys/ioccom.h>
48#include <sys/types.h>
49typedef int8_t __s8;
50typedef uint8_t __u8;
51typedef int16_t __s16;
52typedef uint16_t __u16;
53typedef int32_t __s32;
54typedef uint32_t __u32;
55typedef int64_t __s64;
56typedef uint64_t __u64;
57typedef unsigned long drm_handle_t;
58
59#endif
50 60
51#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ 61#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
52#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ 62#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
@@ -59,7 +69,6 @@
59#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) 69#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
60#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) 70#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
61 71
62typedef unsigned int drm_handle_t;
63typedef unsigned int drm_context_t; 72typedef unsigned int drm_context_t;
64typedef unsigned int drm_drawable_t; 73typedef unsigned int drm_drawable_t;
65typedef unsigned int drm_magic_t; 74typedef unsigned int drm_magic_t;
@@ -454,6 +463,7 @@ struct drm_irq_busid {
454enum drm_vblank_seq_type { 463enum drm_vblank_seq_type {
455 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 464 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
456 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 465 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
466 _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
457 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ 467 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
458 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 468 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
459 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 469 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
@@ -461,8 +471,8 @@ enum drm_vblank_seq_type {
461}; 471};
462 472
463#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) 473#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
464#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \ 474#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
465 _DRM_VBLANK_NEXTONMISS) 475 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
466 476
467struct drm_wait_vblank_request { 477struct drm_wait_vblank_request {
468 enum drm_vblank_seq_type type; 478 enum drm_vblank_seq_type type;
@@ -686,6 +696,8 @@ struct drm_gem_open {
686#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) 696#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
687#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) 697#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
688#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) 698#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
699#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
700#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
689 701
690/** 702/**
691 * Device specific ioctls should only be in their respective headers 703 * Device specific ioctls should only be in their respective headers
@@ -698,6 +710,35 @@ struct drm_gem_open {
698#define DRM_COMMAND_BASE 0x40 710#define DRM_COMMAND_BASE 0x40
699#define DRM_COMMAND_END 0xA0 711#define DRM_COMMAND_END 0xA0
700 712
713/**
714 * Header for events written back to userspace on the drm fd. The
715 * type defines the type of event, the length specifies the total
716 * length of the event (including the header), and user_data is
717 * typically a 64 bit value passed with the ioctl that triggered the
718 * event. A read on the drm fd will always only return complete
719 * events, that is, if for example the read buffer is 100 bytes, and
720 * there are two 64 byte events pending, only one will be returned.
721 *
722 * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
723 * up are chipset specific.
724 */
725struct drm_event {
726 __u32 type;
727 __u32 length;
728};
729
730#define DRM_EVENT_VBLANK 0x01
731#define DRM_EVENT_FLIP_COMPLETE 0x02
732
733struct drm_event_vblank {
734 struct drm_event base;
735 __u64 user_data;
736 __u32 tv_sec;
737 __u32 tv_usec;
738 __u32 sequence;
739 __u32 reserved;
740};
741
701/* typedef area */ 742/* typedef area */
702#ifndef __KERNEL__ 743#ifndef __KERNEL__
703typedef struct drm_clip_rect drm_clip_rect_t; 744typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index c8e64bbadbcf..2f3b3a00b7a3 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -55,6 +55,7 @@
55#include <linux/mm.h> 55#include <linux/mm.h>
56#include <linux/cdev.h> 56#include <linux/cdev.h>
57#include <linux/mutex.h> 57#include <linux/mutex.h>
58#include <linux/slab.h>
58#if defined(__alpha__) || defined(__powerpc__) 59#if defined(__alpha__) || defined(__powerpc__)
59#include <asm/pgtable.h> /* For pte_wrprotect */ 60#include <asm/pgtable.h> /* For pte_wrprotect */
60#endif 61#endif
@@ -245,16 +246,6 @@ extern void drm_ut_debug_printk(unsigned int request_level,
245 246
246#endif 247#endif
247 248
248#define DRM_PROC_LIMIT (PAGE_SIZE-80)
249
250#define DRM_PROC_PRINT(fmt, arg...) \
251 len += sprintf(&buf[len], fmt , ##arg); \
252 if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
253
254#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
255 len += sprintf(&buf[len], fmt , ##arg); \
256 if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
257
258/*@}*/ 249/*@}*/
259 250
260/***********************************************************************/ 251/***********************************************************************/
@@ -265,19 +256,8 @@ extern void drm_ut_debug_printk(unsigned int request_level,
265 256
266#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) 257#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
267#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) 258#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
268#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
269 259
270#define DRM_IF_VERSION(maj, min) (maj << 16 | min) 260#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
271/**
272 * Get the private SAREA mapping.
273 *
274 * \param _dev DRM device.
275 * \param _ctx context number.
276 * \param _map output mapping.
277 */
278#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
279 (_map) = (_dev)->context_sareas[_ctx]; \
280} while(0)
281 261
282/** 262/**
283 * Test that the hardware lock is held by the caller, returning otherwise. 263 * Test that the hardware lock is held by the caller, returning otherwise.
@@ -297,18 +277,6 @@ do { \
297} while (0) 277} while (0)
298 278
299/** 279/**
300 * Copy and IOCTL return string to user space
301 */
302#define DRM_COPY( name, value ) \
303 len = strlen( value ); \
304 if ( len > name##_len ) len = name##_len; \
305 name##_len = strlen( value ); \
306 if ( len && name ) { \
307 if ( copy_to_user( name, value, len ) ) \
308 return -EFAULT; \
309 }
310
311/**
312 * Ioctl function type. 280 * Ioctl function type.
313 * 281 *
314 * \param inode device inode. 282 * \param inode device inode.
@@ -322,10 +290,14 @@ typedef int drm_ioctl_t(struct drm_device *dev, void *data,
322typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, 290typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
323 unsigned long arg); 291 unsigned long arg);
324 292
293#define DRM_IOCTL_NR(n) _IOC_NR(n)
294#define DRM_MAJOR 226
295
325#define DRM_AUTH 0x1 296#define DRM_AUTH 0x1
326#define DRM_MASTER 0x2 297#define DRM_MASTER 0x2
327#define DRM_ROOT_ONLY 0x4 298#define DRM_ROOT_ONLY 0x4
328#define DRM_CONTROL_ALLOW 0x8 299#define DRM_CONTROL_ALLOW 0x8
300#define DRM_UNLOCKED 0x10
329 301
330struct drm_ioctl_desc { 302struct drm_ioctl_desc {
331 unsigned int cmd; 303 unsigned int cmd;
@@ -426,6 +398,14 @@ struct drm_buf_entry {
426 struct drm_freelist freelist; 398 struct drm_freelist freelist;
427}; 399};
428 400
401/* Event queued up for userspace to read */
402struct drm_pending_event {
403 struct drm_event *event;
404 struct list_head link;
405 struct drm_file *file_priv;
406 void (*destroy)(struct drm_pending_event *event);
407};
408
429/** File private data */ 409/** File private data */
430struct drm_file { 410struct drm_file {
431 int authenticated; 411 int authenticated;
@@ -449,6 +429,10 @@ struct drm_file {
449 struct drm_master *master; /* master this node is currently associated with 429 struct drm_master *master; /* master this node is currently associated with
450 N.B. not always minor->master */ 430 N.B. not always minor->master */
451 struct list_head fbs; 431 struct list_head fbs;
432
433 wait_queue_head_t event_wait;
434 struct list_head event_list;
435 int event_space;
452}; 436};
453 437
454/** Wait queue */ 438/** Wait queue */
@@ -795,6 +779,15 @@ struct drm_driver {
795 /* Master routines */ 779 /* Master routines */
796 int (*master_create)(struct drm_device *dev, struct drm_master *master); 780 int (*master_create)(struct drm_device *dev, struct drm_master *master);
797 void (*master_destroy)(struct drm_device *dev, struct drm_master *master); 781 void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
782 /**
783 * master_set is called whenever the minor master is set.
784 * master_drop is called whenever the minor master is dropped.
785 */
786
787 int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
788 bool from_open);
789 void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
790 bool from_release);
798 791
799 int (*proc_init)(struct drm_minor *minor); 792 int (*proc_init)(struct drm_minor *minor);
800 void (*proc_cleanup)(struct drm_minor *minor); 793 void (*proc_cleanup)(struct drm_minor *minor);
@@ -809,6 +802,7 @@ struct drm_driver {
809 */ 802 */
810 int (*gem_init_object) (struct drm_gem_object *obj); 803 int (*gem_init_object) (struct drm_gem_object *obj);
811 void (*gem_free_object) (struct drm_gem_object *obj); 804 void (*gem_free_object) (struct drm_gem_object *obj);
805 void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
812 806
813 /* vga arb irq handler */ 807 /* vga arb irq handler */
814 void (*vgaarb_irq)(struct drm_device *dev, bool state); 808 void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -900,6 +894,12 @@ struct drm_minor {
900 struct drm_mode_group mode_group; 894 struct drm_mode_group mode_group;
901}; 895};
902 896
897struct drm_pending_vblank_event {
898 struct drm_pending_event base;
899 int pipe;
900 struct drm_event_vblank event;
901};
902
903/** 903/**
904 * DRM device structure. This structure represent a complete card that 904 * DRM device structure. This structure represent a complete card that
905 * may contain multiple heads. 905 * may contain multiple heads.
@@ -999,6 +999,12 @@ struct drm_device {
999 999
1000 u32 max_vblank_count; /**< size of vblank counter register */ 1000 u32 max_vblank_count; /**< size of vblank counter register */
1001 1001
1002 /**
1003 * List of events
1004 */
1005 struct list_head vblank_event_list;
1006 spinlock_t event_lock;
1007
1002 /*@} */ 1008 /*@} */
1003 cycles_t ctx_start; 1009 cycles_t ctx_start;
1004 cycles_t lck_start; 1010 cycles_t lck_start;
@@ -1125,8 +1131,8 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
1125 /* Driver support (drm_drv.h) */ 1131 /* Driver support (drm_drv.h) */
1126extern int drm_init(struct drm_driver *driver); 1132extern int drm_init(struct drm_driver *driver);
1127extern void drm_exit(struct drm_driver *driver); 1133extern void drm_exit(struct drm_driver *driver);
1128extern int drm_ioctl(struct inode *inode, struct file *filp, 1134extern long drm_ioctl(struct file *filp,
1129 unsigned int cmd, unsigned long arg); 1135 unsigned int cmd, unsigned long arg);
1130extern long drm_compat_ioctl(struct file *filp, 1136extern long drm_compat_ioctl(struct file *filp,
1131 unsigned int cmd, unsigned long arg); 1137 unsigned int cmd, unsigned long arg);
1132extern int drm_lastclose(struct drm_device *dev); 1138extern int drm_lastclose(struct drm_device *dev);
@@ -1135,6 +1141,8 @@ extern int drm_lastclose(struct drm_device *dev);
1135extern int drm_open(struct inode *inode, struct file *filp); 1141extern int drm_open(struct inode *inode, struct file *filp);
1136extern int drm_stub_open(struct inode *inode, struct file *filp); 1142extern int drm_stub_open(struct inode *inode, struct file *filp);
1137extern int drm_fasync(int fd, struct file *filp, int on); 1143extern int drm_fasync(int fd, struct file *filp, int on);
1144extern ssize_t drm_read(struct file *filp, char __user *buffer,
1145 size_t count, loff_t *offset);
1138extern int drm_release(struct inode *inode, struct file *filp); 1146extern int drm_release(struct inode *inode, struct file *filp);
1139 1147
1140 /* Mapping support (drm_vm.h) */ 1148 /* Mapping support (drm_vm.h) */
@@ -1295,6 +1303,7 @@ extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1295extern void drm_handle_vblank(struct drm_device *dev, int crtc); 1303extern void drm_handle_vblank(struct drm_device *dev, int crtc);
1296extern int drm_vblank_get(struct drm_device *dev, int crtc); 1304extern int drm_vblank_get(struct drm_device *dev, int crtc);
1297extern void drm_vblank_put(struct drm_device *dev, int crtc); 1305extern void drm_vblank_put(struct drm_device *dev, int crtc);
1306extern void drm_vblank_off(struct drm_device *dev, int crtc);
1298extern void drm_vblank_cleanup(struct drm_device *dev); 1307extern void drm_vblank_cleanup(struct drm_device *dev);
1299/* Modesetting support */ 1308/* Modesetting support */
1300extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 1309extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
@@ -1401,7 +1410,7 @@ extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
1401 struct drm_ati_pcigart_info * gart_info); 1410 struct drm_ati_pcigart_info * gart_info);
1402 1411
1403extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, 1412extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
1404 size_t align, dma_addr_t maxaddr); 1413 size_t align);
1405extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1414extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1406extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1415extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1407 1416
@@ -1420,6 +1429,7 @@ extern void drm_sysfs_connector_remove(struct drm_connector *connector);
1420int drm_gem_init(struct drm_device *dev); 1429int drm_gem_init(struct drm_device *dev);
1421void drm_gem_destroy(struct drm_device *dev); 1430void drm_gem_destroy(struct drm_device *dev);
1422void drm_gem_object_free(struct kref *kref); 1431void drm_gem_object_free(struct kref *kref);
1432void drm_gem_object_free_unlocked(struct kref *kref);
1423struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, 1433struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1424 size_t size); 1434 size_t size);
1425void drm_gem_object_handle_free(struct kref *kref); 1435void drm_gem_object_handle_free(struct kref *kref);
@@ -1436,10 +1446,15 @@ drm_gem_object_reference(struct drm_gem_object *obj)
1436static inline void 1446static inline void
1437drm_gem_object_unreference(struct drm_gem_object *obj) 1447drm_gem_object_unreference(struct drm_gem_object *obj)
1438{ 1448{
1439 if (obj == NULL) 1449 if (obj != NULL)
1440 return; 1450 kref_put(&obj->refcount, drm_gem_object_free);
1451}
1441 1452
1442 kref_put(&obj->refcount, drm_gem_object_free); 1453static inline void
1454drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
1455{
1456 if (obj != NULL)
1457 kref_put(&obj->refcount, drm_gem_object_free_unlocked);
1443} 1458}
1444 1459
1445int drm_gem_handle_create(struct drm_file *file_priv, 1460int drm_gem_handle_create(struct drm_file *file_priv,
@@ -1468,6 +1483,21 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1468 drm_gem_object_unreference(obj); 1483 drm_gem_object_unreference(obj);
1469} 1484}
1470 1485
1486static inline void
1487drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
1488{
1489 if (obj == NULL)
1490 return;
1491
1492 /*
1493 * Must bump handle count first as this may be the last
1494 * ref, in which case the object would disappear before we
1495 * checked for a name
1496 */
1497 kref_put(&obj->handlecount, drm_gem_object_handle_free);
1498 drm_gem_object_unreference_unlocked(obj);
1499}
1500
1471struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, 1501struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
1472 struct drm_file *filp, 1502 struct drm_file *filp,
1473 u32 handle); 1503 u32 handle);
@@ -1516,26 +1546,7 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
1516{ 1546{
1517} 1547}
1518 1548
1519 1549#include "drm_mem_util.h"
1520static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
1521{
1522 if (size * nmemb <= PAGE_SIZE)
1523 return kcalloc(nmemb, size, GFP_KERNEL);
1524
1525 if (size != 0 && nmemb > ULONG_MAX / size)
1526 return NULL;
1527
1528 return __vmalloc(size * nmemb,
1529 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
1530}
1531
1532static __inline void drm_free_large(void *ptr)
1533{
1534 if (!is_vmalloc_addr(ptr))
1535 return kfree(ptr);
1536
1537 vfree(ptr);
1538}
1539/*@}*/ 1550/*@}*/
1540 1551
1541#endif /* __KERNEL__ */ 1552#endif /* __KERNEL__ */
diff --git a/include/drm/drm_buffer.h b/include/drm/drm_buffer.h
new file mode 100644
index 000000000000..322dbff3f861
--- /dev/null
+++ b/include/drm/drm_buffer.h
@@ -0,0 +1,148 @@
1/**************************************************************************
2 *
3 * Copyright 2010 Pauli Nieminen.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Multipart buffer for coping data which is larger than the page size.
30 *
31 * Authors:
32 * Pauli Nieminen <suokkos-at-gmail-dot-com>
33 */
34
35#ifndef _DRM_BUFFER_H_
36#define _DRM_BUFFER_H_
37
38#include "drmP.h"
39
40struct drm_buffer {
41 int iterator;
42 int size;
43 char *data[];
44};
45
46
47/**
48 * Return the index of page that buffer is currently pointing at.
49 */
50static inline int drm_buffer_page(struct drm_buffer *buf)
51{
52 return buf->iterator / PAGE_SIZE;
53}
54/**
55 * Return the index of the current byte in the page
56 */
57static inline int drm_buffer_index(struct drm_buffer *buf)
58{
59 return buf->iterator & (PAGE_SIZE - 1);
60}
61/**
62 * Return number of bytes that is left to process
63 */
64static inline int drm_buffer_unprocessed(struct drm_buffer *buf)
65{
66 return buf->size - buf->iterator;
67}
68
69/**
70 * Advance the buffer iterator number of bytes that is given.
71 */
72static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes)
73{
74 buf->iterator += bytes;
75}
76
77/**
78 * Allocate the drm buffer object.
79 *
80 * buf: A pointer to a pointer where the object is stored.
81 * size: The number of bytes to allocate.
82 */
83extern int drm_buffer_alloc(struct drm_buffer **buf, int size);
84
85/**
86 * Copy the user data to the begin of the buffer and reset the processing
87 * iterator.
88 *
89 * user_data: A pointer the data that is copied to the buffer.
90 * size: The Number of bytes to copy.
91 */
92extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
93 void __user *user_data, int size);
94
95/**
96 * Free the drm buffer object
97 */
98extern void drm_buffer_free(struct drm_buffer *buf);
99
100/**
101 * Read an object from buffer that may be split to multiple parts. If object
102 * is not split function just returns the pointer to object in buffer. But in
103 * case of split object data is copied to given stack object that is suplied
104 * by caller.
105 *
106 * The processing location of the buffer is also advanced to the next byte
107 * after the object.
108 *
109 * objsize: The size of the objet in bytes.
110 * stack_obj: A pointer to a memory location where object can be copied.
111 */
112extern void *drm_buffer_read_object(struct drm_buffer *buf,
113 int objsize, void *stack_obj);
114
115/**
116 * Returns the pointer to the dword which is offset number of elements from the
117 * current processing location.
118 *
119 * Caller must make sure that dword is not split in the buffer. This
120 * requirement is easily met if all the sizes of objects in buffer are
121 * multiples of dword and PAGE_SIZE is multiple dword.
122 *
123 * Call to this function doesn't change the processing location.
124 *
125 * offset: The index of the dword relative to the internat iterator.
126 */
127static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer,
128 int offset)
129{
130 int iter = buffer->iterator + offset * 4;
131 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
132}
133/**
134 * Returns the pointer to the dword which is offset number of elements from
135 * the current processing location.
136 *
137 * Call to this function doesn't change the processing location.
138 *
139 * offset: The index of the byte relative to the internat iterator.
140 */
141static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer,
142 int offset)
143{
144 int iter = buffer->iterator + offset;
145 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
146}
147
148#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b69347b8904f..1347524a8e30 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -123,7 +123,7 @@ struct drm_display_mode {
123 int type; 123 int type;
124 124
125 /* Proposed mode values */ 125 /* Proposed mode values */
126 int clock; 126 int clock; /* in kHz */
127 int hdisplay; 127 int hdisplay;
128 int hsync_start; 128 int hsync_start;
129 int hsync_end; 129 int hsync_end;
@@ -164,8 +164,8 @@ struct drm_display_mode {
164 int *private; 164 int *private;
165 int private_flags; 165 int private_flags;
166 166
167 int vrefresh; 167 int vrefresh; /* in Hz */
168 float hsync; 168 int hsync; /* in kHz */
169}; 169};
170 170
171enum drm_connector_status { 171enum drm_connector_status {
@@ -242,6 +242,21 @@ struct drm_framebuffer_funcs {
242 int (*create_handle)(struct drm_framebuffer *fb, 242 int (*create_handle)(struct drm_framebuffer *fb,
243 struct drm_file *file_priv, 243 struct drm_file *file_priv,
244 unsigned int *handle); 244 unsigned int *handle);
245 /**
246 * Optinal callback for the dirty fb ioctl.
247 *
248 * Userspace can notify the driver via this callback
249 * that a area of the framebuffer has changed and should
250 * be flushed to the display hardware.
251 *
252 * See documentation in drm_mode.h for the struct
253 * drm_mode_fb_dirty_cmd for more information as all
254 * the semantics and arguments have a one to one mapping
255 * on this function.
256 */
257 int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags,
258 unsigned color, struct drm_clip_rect *clips,
259 unsigned num_clips);
245}; 260};
246 261
247struct drm_framebuffer { 262struct drm_framebuffer {
@@ -256,7 +271,7 @@ struct drm_framebuffer {
256 unsigned int depth; 271 unsigned int depth;
257 int bits_per_pixel; 272 int bits_per_pixel;
258 int flags; 273 int flags;
259 void *fbdev; 274 struct fb_info *fbdev;
260 u32 pseudo_palette[17]; 275 u32 pseudo_palette[17];
261 struct list_head filp_head; 276 struct list_head filp_head;
262 /* if you are using the helper */ 277 /* if you are using the helper */
@@ -290,6 +305,7 @@ struct drm_property {
290struct drm_crtc; 305struct drm_crtc;
291struct drm_connector; 306struct drm_connector;
292struct drm_encoder; 307struct drm_encoder;
308struct drm_pending_vblank_event;
293 309
294/** 310/**
295 * drm_crtc_funcs - control CRTCs for a given device 311 * drm_crtc_funcs - control CRTCs for a given device
@@ -333,6 +349,19 @@ struct drm_crtc_funcs {
333 void (*destroy)(struct drm_crtc *crtc); 349 void (*destroy)(struct drm_crtc *crtc);
334 350
335 int (*set_config)(struct drm_mode_set *set); 351 int (*set_config)(struct drm_mode_set *set);
352
353 /*
354 * Flip to the given framebuffer. This implements the page
355 * flip ioctl descibed in drm_mode.h, specifically, the
356 * implementation must return immediately and block all
357 * rendering to the current fb until the flip has completed.
358 * If userspace set the event flag in the ioctl, the event
359 * argument will point to an event to send back when the flip
360 * completes, otherwise it will be NULL.
361 */
362 int (*page_flip)(struct drm_crtc *crtc,
363 struct drm_framebuffer *fb,
364 struct drm_pending_vblank_event *event);
336}; 365};
337 366
338/** 367/**
@@ -596,6 +625,7 @@ struct drm_mode_config {
596 /* Optional properties */ 625 /* Optional properties */
597 struct drm_property *scaling_mode_property; 626 struct drm_property *scaling_mode_property;
598 struct drm_property *dithering_mode_property; 627 struct drm_property *dithering_mode_property;
628 struct drm_property *dirty_info_property;
599}; 629};
600 630
601#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) 631#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@ -667,6 +697,7 @@ extern void drm_mode_validate_size(struct drm_device *dev,
667extern void drm_mode_prune_invalid(struct drm_device *dev, 697extern void drm_mode_prune_invalid(struct drm_device *dev,
668 struct list_head *mode_list, bool verbose); 698 struct list_head *mode_list, bool verbose);
669extern void drm_mode_sort(struct list_head *mode_list); 699extern void drm_mode_sort(struct list_head *mode_list);
700extern int drm_mode_hsync(struct drm_display_mode *mode);
670extern int drm_mode_vrefresh(struct drm_display_mode *mode); 701extern int drm_mode_vrefresh(struct drm_display_mode *mode);
671extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, 702extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
672 int adjust_flags); 703 int adjust_flags);
@@ -703,6 +734,7 @@ extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats
703 char *formats[]); 734 char *formats[]);
704extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); 735extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
705extern int drm_mode_create_dithering_property(struct drm_device *dev); 736extern int drm_mode_create_dithering_property(struct drm_device *dev);
737extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
706extern char *drm_get_encoder_name(struct drm_encoder *encoder); 738extern char *drm_get_encoder_name(struct drm_encoder *encoder);
707 739
708extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, 740extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
@@ -711,7 +743,8 @@ extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
711 struct drm_encoder *encoder); 743 struct drm_encoder *encoder);
712extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, 744extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
713 int gamma_size); 745 int gamma_size);
714extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type); 746extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
747 uint32_t id, uint32_t type);
715/* IOCTLs */ 748/* IOCTLs */
716extern int drm_mode_getresources(struct drm_device *dev, 749extern int drm_mode_getresources(struct drm_device *dev,
717 void *data, struct drm_file *file_priv); 750 void *data, struct drm_file *file_priv);
@@ -730,6 +763,8 @@ extern int drm_mode_rmfb(struct drm_device *dev,
730 void *data, struct drm_file *file_priv); 763 void *data, struct drm_file *file_priv);
731extern int drm_mode_getfb(struct drm_device *dev, 764extern int drm_mode_getfb(struct drm_device *dev,
732 void *data, struct drm_file *file_priv); 765 void *data, struct drm_file *file_priv);
766extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
767 void *data, struct drm_file *file_priv);
733extern int drm_mode_addmode_ioctl(struct drm_device *dev, 768extern int drm_mode_addmode_ioctl(struct drm_device *dev,
734 void *data, struct drm_file *file_priv); 769 void *data, struct drm_file *file_priv);
735extern int drm_mode_rmmode_ioctl(struct drm_device *dev, 770extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
@@ -756,6 +791,8 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
756extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, 791extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
757 void *data, struct drm_file *file_priv); 792 void *data, struct drm_file *file_priv);
758extern bool drm_detect_hdmi_monitor(struct edid *edid); 793extern bool drm_detect_hdmi_monitor(struct edid *edid);
794extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
795 void *data, struct drm_file *file_priv);
759extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, 796extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
760 int hdisplay, int vdisplay, int vrefresh, 797 int hdisplay, int vdisplay, int vrefresh,
761 bool reduced, bool interlaced, bool margins); 798 bool reduced, bool interlaced, bool margins);
@@ -764,4 +801,6 @@ extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
764 bool interlaced, int margins); 801 bool interlaced, int margins);
765extern int drm_add_modes_noedid(struct drm_connector *connector, 802extern int drm_add_modes_noedid(struct drm_connector *connector,
766 int hdisplay, int vdisplay); 803 int hdisplay, int vdisplay);
804
805extern bool drm_edid_is_valid(struct edid *edid);
767#endif /* __DRM_CRTC_H__ */ 806#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
new file mode 100644
index 000000000000..a49e791db0b0
--- /dev/null
+++ b/include/drm/drm_dp_helper.h
@@ -0,0 +1,180 @@
1/*
2 * Copyright © 2008 Keith Packard
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#ifndef _DRM_DP_HELPER_H_
24#define _DRM_DP_HELPER_H_
25
26/* From the VESA DisplayPort spec */
27
28#define AUX_NATIVE_WRITE 0x8
29#define AUX_NATIVE_READ 0x9
30#define AUX_I2C_WRITE 0x0
31#define AUX_I2C_READ 0x1
32#define AUX_I2C_STATUS 0x2
33#define AUX_I2C_MOT 0x4
34
35#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
36#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
37#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
38#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
39
40#define AUX_I2C_REPLY_ACK (0x0 << 6)
41#define AUX_I2C_REPLY_NACK (0x1 << 6)
42#define AUX_I2C_REPLY_DEFER (0x2 << 6)
43#define AUX_I2C_REPLY_MASK (0x3 << 6)
44
45/* AUX CH addresses */
46/* DPCD */
47#define DP_DPCD_REV 0x000
48
49#define DP_MAX_LINK_RATE 0x001
50
51#define DP_MAX_LANE_COUNT 0x002
52# define DP_MAX_LANE_COUNT_MASK 0x1f
53# define DP_ENHANCED_FRAME_CAP (1 << 7)
54
55#define DP_MAX_DOWNSPREAD 0x003
56# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
57
58#define DP_NORP 0x004
59
60#define DP_DOWNSTREAMPORT_PRESENT 0x005
61# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
62# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
63/* 00b = DisplayPort */
64/* 01b = Analog */
65/* 10b = TMDS or HDMI */
66/* 11b = Other */
67# define DP_FORMAT_CONVERSION (1 << 3)
68
69#define DP_MAIN_LINK_CHANNEL_CODING 0x006
70
71/* link configuration */
72#define DP_LINK_BW_SET 0x100
73# define DP_LINK_BW_1_62 0x06
74# define DP_LINK_BW_2_7 0x0a
75
76#define DP_LANE_COUNT_SET 0x101
77# define DP_LANE_COUNT_MASK 0x0f
78# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
79
80#define DP_TRAINING_PATTERN_SET 0x102
81# define DP_TRAINING_PATTERN_DISABLE 0
82# define DP_TRAINING_PATTERN_1 1
83# define DP_TRAINING_PATTERN_2 2
84# define DP_TRAINING_PATTERN_MASK 0x3
85
86# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
87# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
88# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
89# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
90# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
91
92# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
93# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
94
95# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
96# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
97# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
98# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
99
100#define DP_TRAINING_LANE0_SET 0x103
101#define DP_TRAINING_LANE1_SET 0x104
102#define DP_TRAINING_LANE2_SET 0x105
103#define DP_TRAINING_LANE3_SET 0x106
104
105# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
106# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
107# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
108# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
109# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
110# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
111# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
112
113# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
114# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
115# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
116# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
117# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
118
119# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
120# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
121
122#define DP_DOWNSPREAD_CTRL 0x107
123# define DP_SPREAD_AMP_0_5 (1 << 4)
124
125#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
126# define DP_SET_ANSI_8B10B (1 << 0)
127
128#define DP_LANE0_1_STATUS 0x202
129#define DP_LANE2_3_STATUS 0x203
130# define DP_LANE_CR_DONE (1 << 0)
131# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
132# define DP_LANE_SYMBOL_LOCKED (1 << 2)
133
134#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
135 DP_LANE_CHANNEL_EQ_DONE | \
136 DP_LANE_SYMBOL_LOCKED)
137
138#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
139
140#define DP_INTERLANE_ALIGN_DONE (1 << 0)
141#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
142#define DP_LINK_STATUS_UPDATED (1 << 7)
143
144#define DP_SINK_STATUS 0x205
145
146#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
147#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
148
149#define DP_ADJUST_REQUEST_LANE0_1 0x206
150#define DP_ADJUST_REQUEST_LANE2_3 0x207
151# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
152# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
153# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
154# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
155# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
156# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
157# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
158# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
159
160#define DP_SET_POWER 0x600
161# define DP_SET_POWER_D0 0x1
162# define DP_SET_POWER_D3 0x2
163
164#define MODE_I2C_START 1
165#define MODE_I2C_WRITE 2
166#define MODE_I2C_READ 4
167#define MODE_I2C_STOP 8
168
169struct i2c_algo_dp_aux_data {
170 bool running;
171 u16 address;
172 int (*aux_ch) (struct i2c_adapter *adapter,
173 int mode, uint8_t write_byte,
174 uint8_t *read_byte);
175};
176
177int
178i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
179
180#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 7d6c9a2dfcbb..b4209898f115 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -106,6 +106,10 @@ struct detailed_data_color_point {
106 u8 wpindex2[3]; 106 u8 wpindex2[3];
107} __attribute__((packed)); 107} __attribute__((packed));
108 108
109struct cvt_timing {
110 u8 code[3];
111} __attribute__((packed));
112
109struct detailed_non_pixel { 113struct detailed_non_pixel {
110 u8 pad1; 114 u8 pad1;
111 u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name 115 u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
@@ -117,9 +121,13 @@ struct detailed_non_pixel {
117 struct detailed_data_monitor_range range; 121 struct detailed_data_monitor_range range;
118 struct detailed_data_wpindex color; 122 struct detailed_data_wpindex color;
119 struct std_timing timings[5]; 123 struct std_timing timings[5];
124 struct cvt_timing cvt[4];
120 } data; 125 } data;
121} __attribute__((packed)); 126} __attribute__((packed));
122 127
128#define EDID_DETAIL_EST_TIMINGS 0xf7
129#define EDID_DETAIL_CVT_3BYTE 0xf8
130#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
123#define EDID_DETAIL_STD_MODES 0xfa 131#define EDID_DETAIL_STD_MODES 0xfa
124#define EDID_DETAIL_MONITOR_CPDATA 0xfb 132#define EDID_DETAIL_MONITOR_CPDATA 0xfb
125#define EDID_DETAIL_MONITOR_NAME 0xfc 133#define EDID_DETAIL_MONITOR_NAME 0xfc
@@ -193,4 +201,7 @@ struct edid {
193 201
194#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) 202#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
195 203
204/* define the number of Extension EDID block */
205#define DRM_MAX_EDID_EXT_NUM 4
206
196#endif /* __DRM_EDID_H__ */ 207#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
new file mode 100644
index 000000000000..6bd325fedc87
--- /dev/null
+++ b/include/drm/drm_mem_util.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jesse Barnes <jbarnes@virtuousgeek.org>
25 *
26 */
27#ifndef _DRM_MEM_UTIL_H_
28#define _DRM_MEM_UTIL_H_
29
30#include <linux/vmalloc.h>
31
32static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
33{
34 if (size != 0 && nmemb > ULONG_MAX / size)
35 return NULL;
36
37 if (size * nmemb <= PAGE_SIZE)
38 return kcalloc(nmemb, size, GFP_KERNEL);
39
40 return __vmalloc(size * nmemb,
41 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
42}
43
44/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
45static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
46{
47 if (size != 0 && nmemb > ULONG_MAX / size)
48 return NULL;
49
50 if (size * nmemb <= PAGE_SIZE)
51 return kmalloc(nmemb * size, GFP_KERNEL);
52
53 return __vmalloc(size * nmemb,
54 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
55}
56
57static __inline void drm_free_large(void *ptr)
58{
59 if (!is_vmalloc_addr(ptr))
60 return kfree(ptr);
61
62 vfree(ptr);
63}
64
65#endif
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 62329f9a42cb..4c10be39a43b 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -66,6 +66,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
66 unsigned long size, 66 unsigned long size,
67 unsigned alignment, 67 unsigned alignment,
68 int atomic); 68 int atomic);
69extern struct drm_mm_node *drm_mm_get_block_range_generic(
70 struct drm_mm_node *node,
71 unsigned long size,
72 unsigned alignment,
73 unsigned long start,
74 unsigned long end,
75 int atomic);
69static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, 76static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
70 unsigned long size, 77 unsigned long size,
71 unsigned alignment) 78 unsigned alignment)
@@ -78,11 +85,38 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa
78{ 85{
79 return drm_mm_get_block_generic(parent, size, alignment, 1); 86 return drm_mm_get_block_generic(parent, size, alignment, 1);
80} 87}
88static inline struct drm_mm_node *drm_mm_get_block_range(
89 struct drm_mm_node *parent,
90 unsigned long size,
91 unsigned alignment,
92 unsigned long start,
93 unsigned long end)
94{
95 return drm_mm_get_block_range_generic(parent, size, alignment,
96 start, end, 0);
97}
98static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
99 struct drm_mm_node *parent,
100 unsigned long size,
101 unsigned alignment,
102 unsigned long start,
103 unsigned long end)
104{
105 return drm_mm_get_block_range_generic(parent, size, alignment,
106 start, end, 1);
107}
81extern void drm_mm_put_block(struct drm_mm_node *cur); 108extern void drm_mm_put_block(struct drm_mm_node *cur);
82extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, 109extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
83 unsigned long size, 110 unsigned long size,
84 unsigned alignment, 111 unsigned alignment,
85 int best_match); 112 int best_match);
113extern struct drm_mm_node *drm_mm_search_free_in_range(
114 const struct drm_mm *mm,
115 unsigned long size,
116 unsigned alignment,
117 unsigned long start,
118 unsigned long end,
119 int best_match);
86extern int drm_mm_init(struct drm_mm *mm, unsigned long start, 120extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
87 unsigned long size); 121 unsigned long size);
88extern void drm_mm_takedown(struct drm_mm *mm); 122extern void drm_mm_takedown(struct drm_mm *mm);
@@ -99,6 +133,7 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
99 return block->mm; 133 return block->mm;
100} 134}
101 135
136extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
102#ifdef CONFIG_DEBUG_FS 137#ifdef CONFIG_DEBUG_FS
103int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); 138int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
104#endif 139#endif
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 1f908416aedb..c5ba1636613c 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -27,9 +27,6 @@
27#ifndef _DRM_MODE_H 27#ifndef _DRM_MODE_H
28#define _DRM_MODE_H 28#define _DRM_MODE_H
29 29
30#include <linux/kernel.h>
31#include <linux/types.h>
32
33#define DRM_DISPLAY_INFO_LEN 32 30#define DRM_DISPLAY_INFO_LEN 32
34#define DRM_CONNECTOR_NAME_LEN 32 31#define DRM_CONNECTOR_NAME_LEN 32
35#define DRM_DISPLAY_MODE_LEN 32 32#define DRM_DISPLAY_MODE_LEN 32
@@ -78,12 +75,17 @@
78#define DRM_MODE_DITHERING_OFF 0 75#define DRM_MODE_DITHERING_OFF 0
79#define DRM_MODE_DITHERING_ON 1 76#define DRM_MODE_DITHERING_ON 1
80 77
78/* Dirty info options */
79#define DRM_MODE_DIRTY_OFF 0
80#define DRM_MODE_DIRTY_ON 1
81#define DRM_MODE_DIRTY_ANNOTATE 2
82
81struct drm_mode_modeinfo { 83struct drm_mode_modeinfo {
82 __u32 clock; 84 __u32 clock;
83 __u16 hdisplay, hsync_start, hsync_end, htotal, hskew; 85 __u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
84 __u16 vdisplay, vsync_start, vsync_end, vtotal, vscan; 86 __u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
85 87
86 __u32 vrefresh; /* vertical refresh * 1000 */ 88 __u32 vrefresh;
87 89
88 __u32 flags; 90 __u32 flags;
89 __u32 type; 91 __u32 type;
@@ -158,6 +160,7 @@ struct drm_mode_get_encoder {
158#define DRM_MODE_CONNECTOR_HDMIA 11 160#define DRM_MODE_CONNECTOR_HDMIA 11
159#define DRM_MODE_CONNECTOR_HDMIB 12 161#define DRM_MODE_CONNECTOR_HDMIB 12
160#define DRM_MODE_CONNECTOR_TV 13 162#define DRM_MODE_CONNECTOR_TV 13
163#define DRM_MODE_CONNECTOR_eDP 14
161 164
162struct drm_mode_get_connector { 165struct drm_mode_get_connector {
163 166
@@ -225,6 +228,45 @@ struct drm_mode_fb_cmd {
225 __u32 handle; 228 __u32 handle;
226}; 229};
227 230
231#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
232#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
233#define DRM_MODE_FB_DIRTY_FLAGS 0x03
234
235/*
236 * Mark a region of a framebuffer as dirty.
237 *
238 * Some hardware does not automatically update display contents
239 * as a hardware or software draw to a framebuffer. This ioctl
240 * allows userspace to tell the kernel and the hardware what
241 * regions of the framebuffer have changed.
242 *
243 * The kernel or hardware is free to update more then just the
244 * region specified by the clip rects. The kernel or hardware
245 * may also delay and/or coalesce several calls to dirty into a
246 * single update.
247 *
248 * Userspace may annotate the updates, the annotates are a
249 * promise made by the caller that the change is either a copy
250 * of pixels or a fill of a single color in the region specified.
251 *
252 * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
253 * the number of updated regions are half of num_clips given,
254 * where the clip rects are paired in src and dst. The width and
255 * height of each one of the pairs must match.
256 *
257 * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
258 * promises that the region specified of the clip rects is filled
259 * completely with a single color as given in the color argument.
260 */
261
262struct drm_mode_fb_dirty_cmd {
263 __u32 fb_id;
264 __u32 flags;
265 __u32 color;
266 __u32 num_clips;
267 __u64 clips_ptr;
268};
269
228struct drm_mode_mode_cmd { 270struct drm_mode_mode_cmd {
229 __u32 connector_id; 271 __u32 connector_id;
230 struct drm_mode_modeinfo mode; 272 struct drm_mode_modeinfo mode;
@@ -268,4 +310,37 @@ struct drm_mode_crtc_lut {
268 __u64 blue; 310 __u64 blue;
269}; 311};
270 312
313#define DRM_MODE_PAGE_FLIP_EVENT 0x01
314#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
315
316/*
317 * Request a page flip on the specified crtc.
318 *
319 * This ioctl will ask KMS to schedule a page flip for the specified
320 * crtc. Once any pending rendering targeting the specified fb (as of
321 * ioctl time) has completed, the crtc will be reprogrammed to display
322 * that fb after the next vertical refresh. The ioctl returns
323 * immediately, but subsequent rendering to the current fb will block
324 * in the execbuffer ioctl until the page flip happens. If a page
325 * flip is already pending as the ioctl is called, EBUSY will be
326 * returned.
327 *
328 * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
329 * request that drm sends back a vblank event (see drm.h: struct
330 * drm_event_vblank) when the page flip is done. The user_data field
331 * passed in with this ioctl will be returned as the user_data field
332 * in the vblank event struct.
333 *
334 * The reserved field must be zero until we figure out something
335 * clever to use it for.
336 */
337
338struct drm_mode_crtc_page_flip {
339 __u32 crtc_id;
340 __u32 fb_id;
341 __u32 flags;
342 __u32 reserved;
343 __u64 user_data;
344};
345
271#endif 346#endif
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 26641e95e0a4..393369147a2d 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -123,5 +123,5 @@ do { \
123 remove_wait_queue(&(queue), &entry); \ 123 remove_wait_queue(&(queue), &entry); \
124} while (0) 124} while (0)
125 125
126#define DRM_WAKEUP( queue ) wake_up_interruptible( queue ) 126#define DRM_WAKEUP( queue ) wake_up( queue )
127#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) 127#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index e6f3b120f51a..2d428b088cc8 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -6,6 +6,7 @@
6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
9 {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
9 {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 10 {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
10 {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 11 {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
11 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ 12 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
@@ -141,6 +142,41 @@
141 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 142 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
142 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 143 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
143 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 144 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
145 {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
146 {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
147 {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
148 {0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
149 {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
150 {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
151 {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
152 {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
153 {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
154 {0x1002, 0x68a0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
155 {0x1002, 0x68a1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
156 {0x1002, 0x68a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
157 {0x1002, 0x68a9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
158 {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
159 {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
160 {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
161 {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
162 {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
163 {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
164 {0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
165 {0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
166 {0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
167 {0x1002, 0x68d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
168 {0x1002, 0x68da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
169 {0x1002, 0x68de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
170 {0x1002, 0x68e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
171 {0x1002, 0x68e1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
172 {0x1002, 0x68e4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
173 {0x1002, 0x68e5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
174 {0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
176 {0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
177 {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
178 {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
179 {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
144 {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ 180 {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
145 {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 181 {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
146 {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 182 {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -375,6 +411,7 @@
375 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 411 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
376 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 412 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
377 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 413 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
414 {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
378 {0, 0, 0} 415 {0, 0, 0}
379 416
380#define r128_PCI_IDS \ 417#define r128_PCI_IDS \
@@ -558,4 +595,5 @@
558 {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 595 {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
559 {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 596 {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
560 {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 597 {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
598 {0x8086, 0x0102, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
561 {0, 0, 0} 599 {0, 0, 0}
diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h
new file mode 100644
index 000000000000..8390b437a1f8
--- /dev/null
+++ b/include/drm/i2c/ch7006.h
@@ -0,0 +1,86 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __DRM_I2C_CH7006_H__
28#define __DRM_I2C_CH7006_H__
29
30/**
31 * struct ch7006_encoder_params
32 *
33 * Describes how the ch7006 is wired up with the GPU. It should be
34 * used as the @params parameter of its @set_config method.
35 *
36 * See "http://www.chrontel.com/pdf/7006.pdf" for their precise
37 * meaning.
38 */
39struct ch7006_encoder_params {
40 enum {
41 CH7006_FORMAT_RGB16 = 0,
42 CH7006_FORMAT_YCrCb24m16,
43 CH7006_FORMAT_RGB24m16,
44 CH7006_FORMAT_RGB15,
45 CH7006_FORMAT_RGB24m12C,
46 CH7006_FORMAT_RGB24m12I,
47 CH7006_FORMAT_RGB24m8,
48 CH7006_FORMAT_RGB16m8,
49 CH7006_FORMAT_RGB15m8,
50 CH7006_FORMAT_YCrCb24m8,
51 } input_format;
52
53 enum {
54 CH7006_CLOCK_SLAVE = 0,
55 CH7006_CLOCK_MASTER,
56 } clock_mode;
57
58 enum {
59 CH7006_CLOCK_EDGE_NEG = 0,
60 CH7006_CLOCK_EDGE_POS,
61 } clock_edge;
62
63 int xcm, pcm;
64
65 enum {
66 CH7006_SYNC_SLAVE = 0,
67 CH7006_SYNC_MASTER,
68 } sync_direction;
69
70 enum {
71 CH7006_SYNC_SEPARATED = 0,
72 CH7006_SYNC_EMBEDDED,
73 } sync_encoding;
74
75 enum {
76 CH7006_POUT_1_8V = 0,
77 CH7006_POUT_3_3V,
78 } pout_level;
79
80 enum {
81 CH7006_ACTIVE_HSYNC = 0,
82 CH7006_ACTIVE_DSTART,
83 } active_detect;
84};
85
86#endif
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 7e0cb1da92e6..b64a8d7cdf6d 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -27,11 +27,11 @@
27#ifndef _I915_DRM_H_ 27#ifndef _I915_DRM_H_
28#define _I915_DRM_H_ 28#define _I915_DRM_H_
29 29
30#include "drm.h"
31
30/* Please note that modifications to all structs defined here are 32/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints. 33 * subject to backwards-compatibility constraints.
32 */ 34 */
33#include <linux/types.h>
34#include "drm.h"
35 35
36/* Each region is a minimum of 16k, and there are at most 255 of them. 36/* Each region is a minimum of 16k, and there are at most 255 of them.
37 */ 37 */
@@ -186,6 +186,9 @@ typedef struct _drm_i915_sarea {
186#define DRM_I915_GEM_MMAP_GTT 0x24 186#define DRM_I915_GEM_MMAP_GTT 0x24
187#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 187#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
188#define DRM_I915_GEM_MADVISE 0x26 188#define DRM_I915_GEM_MADVISE 0x26
189#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
190#define DRM_I915_OVERLAY_ATTRS 0x28
191#define DRM_I915_GEM_EXECBUFFER2 0x29
189 192
190#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 193#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
191#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 194#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -205,6 +208,7 @@ typedef struct _drm_i915_sarea {
205#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 208#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
206#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 209#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
207#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 210#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
211#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
208#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 212#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
209#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 213#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
210#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 214#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
@@ -221,8 +225,10 @@ typedef struct _drm_i915_sarea {
221#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 225#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
222#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 226#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
223#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 227#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
224#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id) 228#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
225#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 229#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
230#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_ATTRS, struct drm_intel_overlay_put_image)
231#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
226 232
227/* Allow drivers to submit batchbuffers directly to hardware, relying 233/* Allow drivers to submit batchbuffers directly to hardware, relying
228 * on the security mechanisms provided by hardware. 234 * on the security mechanisms provided by hardware.
@@ -266,6 +272,9 @@ typedef struct drm_i915_irq_wait {
266#define I915_PARAM_CHIPSET_ID 4 272#define I915_PARAM_CHIPSET_ID 4
267#define I915_PARAM_HAS_GEM 5 273#define I915_PARAM_HAS_GEM 5
268#define I915_PARAM_NUM_FENCES_AVAIL 6 274#define I915_PARAM_NUM_FENCES_AVAIL 6
275#define I915_PARAM_HAS_OVERLAY 7
276#define I915_PARAM_HAS_PAGEFLIPPING 8
277#define I915_PARAM_HAS_EXECBUF2 9
269 278
270typedef struct drm_i915_getparam { 279typedef struct drm_i915_getparam {
271 int param; 280 int param;
@@ -561,6 +570,57 @@ struct drm_i915_gem_execbuffer {
561 __u64 cliprects_ptr; 570 __u64 cliprects_ptr;
562}; 571};
563 572
573struct drm_i915_gem_exec_object2 {
574 /**
575 * User's handle for a buffer to be bound into the GTT for this
576 * operation.
577 */
578 __u32 handle;
579
580 /** Number of relocations to be performed on this buffer */
581 __u32 relocation_count;
582 /**
583 * Pointer to array of struct drm_i915_gem_relocation_entry containing
584 * the relocations to be performed in this buffer.
585 */
586 __u64 relocs_ptr;
587
588 /** Required alignment in graphics aperture */
589 __u64 alignment;
590
591 /**
592 * Returned value of the updated offset of the object, for future
593 * presumed_offset writes.
594 */
595 __u64 offset;
596
597#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
598 __u64 flags;
599 __u64 rsvd1;
600 __u64 rsvd2;
601};
602
603struct drm_i915_gem_execbuffer2 {
604 /**
605 * List of gem_exec_object2 structs
606 */
607 __u64 buffers_ptr;
608 __u32 buffer_count;
609
610 /** Offset in the batchbuffer to start execution from. */
611 __u32 batch_start_offset;
612 /** Bytes used in batchbuffer from batch_start_offset */
613 __u32 batch_len;
614 __u32 DR1;
615 __u32 DR4;
616 __u32 num_cliprects;
617 /** This is a struct drm_clip_rect *cliprects */
618 __u64 cliprects_ptr;
619 __u64 flags; /* currently unused */
620 __u64 rsvd1;
621 __u64 rsvd2;
622};
623
564struct drm_i915_gem_pin { 624struct drm_i915_gem_pin {
565 /** Handle of the buffer to be pinned. */ 625 /** Handle of the buffer to be pinned. */
566 __u32 handle; 626 __u32 handle;
@@ -686,4 +746,70 @@ struct drm_i915_gem_madvise {
686 __u32 retained; 746 __u32 retained;
687}; 747};
688 748
749/* flags */
750#define I915_OVERLAY_TYPE_MASK 0xff
751#define I915_OVERLAY_YUV_PLANAR 0x01
752#define I915_OVERLAY_YUV_PACKED 0x02
753#define I915_OVERLAY_RGB 0x03
754
755#define I915_OVERLAY_DEPTH_MASK 0xff00
756#define I915_OVERLAY_RGB24 0x1000
757#define I915_OVERLAY_RGB16 0x2000
758#define I915_OVERLAY_RGB15 0x3000
759#define I915_OVERLAY_YUV422 0x0100
760#define I915_OVERLAY_YUV411 0x0200
761#define I915_OVERLAY_YUV420 0x0300
762#define I915_OVERLAY_YUV410 0x0400
763
764#define I915_OVERLAY_SWAP_MASK 0xff0000
765#define I915_OVERLAY_NO_SWAP 0x000000
766#define I915_OVERLAY_UV_SWAP 0x010000
767#define I915_OVERLAY_Y_SWAP 0x020000
768#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
769
770#define I915_OVERLAY_FLAGS_MASK 0xff000000
771#define I915_OVERLAY_ENABLE 0x01000000
772
773struct drm_intel_overlay_put_image {
774 /* various flags and src format description */
775 __u32 flags;
776 /* source picture description */
777 __u32 bo_handle;
778 /* stride values and offsets are in bytes, buffer relative */
779 __u16 stride_Y; /* stride for packed formats */
780 __u16 stride_UV;
781 __u32 offset_Y; /* offset for packet formats */
782 __u32 offset_U;
783 __u32 offset_V;
784 /* in pixels */
785 __u16 src_width;
786 __u16 src_height;
787 /* to compensate the scaling factors for partially covered surfaces */
788 __u16 src_scan_width;
789 __u16 src_scan_height;
790 /* output crtc description */
791 __u32 crtc_id;
792 __u16 dst_x;
793 __u16 dst_y;
794 __u16 dst_width;
795 __u16 dst_height;
796};
797
798/* flags */
799#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
800#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
801struct drm_intel_overlay_attrs {
802 __u32 flags;
803 __u32 color_key;
804 __s32 brightness;
805 __u32 contrast;
806 __u32 saturation;
807 __u32 gamma0;
808 __u32 gamma1;
809 __u32 gamma2;
810 __u32 gamma3;
811 __u32 gamma4;
812 __u32 gamma5;
813};
814
689#endif /* _I915_DRM_H_ */ 815#endif /* _I915_DRM_H_ */
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h
index 325fd6fb4a42..3ffbc4798afa 100644
--- a/include/drm/mga_drm.h
+++ b/include/drm/mga_drm.h
@@ -35,7 +35,7 @@
35#ifndef __MGA_DRM_H__ 35#ifndef __MGA_DRM_H__
36#define __MGA_DRM_H__ 36#define __MGA_DRM_H__
37 37
38#include <linux/types.h> 38#include "drm.h"
39 39
40/* WARNING: If you change any of these defines, make sure to change the 40/* WARNING: If you change any of these defines, make sure to change the
41 * defines in the Xserver file (mga_sarea.h) 41 * defines in the Xserver file (mga_sarea.h)
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
new file mode 100644
index 000000000000..a6a9f4af5ebd
--- /dev/null
+++ b/include/drm/nouveau_drm.h
@@ -0,0 +1,199 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef __NOUVEAU_DRM_H__
26#define __NOUVEAU_DRM_H__
27
28#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16
29
30struct drm_nouveau_channel_alloc {
31 uint32_t fb_ctxdma_handle;
32 uint32_t tt_ctxdma_handle;
33
34 int channel;
35 uint32_t pushbuf_domains;
36
37 /* Notifier memory */
38 uint32_t notifier_handle;
39
40 /* DRM-enforced subchannel assignments */
41 struct {
42 uint32_t handle;
43 uint32_t grclass;
44 } subchan[8];
45 uint32_t nr_subchan;
46};
47
48struct drm_nouveau_channel_free {
49 int channel;
50};
51
52struct drm_nouveau_grobj_alloc {
53 int channel;
54 uint32_t handle;
55 int class;
56};
57
58struct drm_nouveau_notifierobj_alloc {
59 uint32_t channel;
60 uint32_t handle;
61 uint32_t size;
62 uint32_t offset;
63};
64
65struct drm_nouveau_gpuobj_free {
66 int channel;
67 uint32_t handle;
68};
69
70/* FIXME : maybe unify {GET,SET}PARAMs */
71#define NOUVEAU_GETPARAM_PCI_VENDOR 3
72#define NOUVEAU_GETPARAM_PCI_DEVICE 4
73#define NOUVEAU_GETPARAM_BUS_TYPE 5
74#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
75#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
76#define NOUVEAU_GETPARAM_FB_SIZE 8
77#define NOUVEAU_GETPARAM_AGP_SIZE 9
78#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
79#define NOUVEAU_GETPARAM_CHIPSET_ID 11
80#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
81#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
82struct drm_nouveau_getparam {
83 uint64_t param;
84 uint64_t value;
85};
86
87struct drm_nouveau_setparam {
88 uint64_t param;
89 uint64_t value;
90};
91
92#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
93#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
94#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
95#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
96
97struct drm_nouveau_gem_info {
98 uint32_t handle;
99 uint32_t domain;
100 uint64_t size;
101 uint64_t offset;
102 uint64_t map_handle;
103 uint32_t tile_mode;
104 uint32_t tile_flags;
105};
106
107struct drm_nouveau_gem_new {
108 struct drm_nouveau_gem_info info;
109 uint32_t channel_hint;
110 uint32_t align;
111};
112
113#define NOUVEAU_GEM_MAX_BUFFERS 1024
114struct drm_nouveau_gem_pushbuf_bo_presumed {
115 uint32_t valid;
116 uint32_t domain;
117 uint64_t offset;
118};
119
120struct drm_nouveau_gem_pushbuf_bo {
121 uint64_t user_priv;
122 uint32_t handle;
123 uint32_t read_domains;
124 uint32_t write_domains;
125 uint32_t valid_domains;
126 struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
127};
128
129#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
130#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
131#define NOUVEAU_GEM_RELOC_OR (1 << 2)
132#define NOUVEAU_GEM_MAX_RELOCS 1024
133struct drm_nouveau_gem_pushbuf_reloc {
134 uint32_t reloc_bo_index;
135 uint32_t reloc_bo_offset;
136 uint32_t bo_index;
137 uint32_t flags;
138 uint32_t data;
139 uint32_t vor;
140 uint32_t tor;
141};
142
143#define NOUVEAU_GEM_MAX_PUSH 512
144struct drm_nouveau_gem_pushbuf_push {
145 uint32_t bo_index;
146 uint32_t pad;
147 uint64_t offset;
148 uint64_t length;
149};
150
151struct drm_nouveau_gem_pushbuf {
152 uint32_t channel;
153 uint32_t nr_buffers;
154 uint64_t buffers;
155 uint32_t nr_relocs;
156 uint32_t nr_push;
157 uint64_t relocs;
158 uint64_t push;
159 uint32_t suffix0;
160 uint32_t suffix1;
161 uint64_t vram_available;
162 uint64_t gart_available;
163};
164
165#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
166#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
167#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
168struct drm_nouveau_gem_cpu_prep {
169 uint32_t handle;
170 uint32_t flags;
171};
172
173struct drm_nouveau_gem_cpu_fini {
174 uint32_t handle;
175};
176
177enum nouveau_bus_type {
178 NV_AGP = 0,
179 NV_PCI = 1,
180 NV_PCIE = 2,
181};
182
183struct drm_nouveau_sarea {
184};
185
186#define DRM_NOUVEAU_GETPARAM 0x00
187#define DRM_NOUVEAU_SETPARAM 0x01
188#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
189#define DRM_NOUVEAU_CHANNEL_FREE 0x03
190#define DRM_NOUVEAU_GROBJ_ALLOC 0x04
191#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05
192#define DRM_NOUVEAU_GPUOBJ_FREE 0x06
193#define DRM_NOUVEAU_GEM_NEW 0x40
194#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
195#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
196#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
197#define DRM_NOUVEAU_GEM_INFO 0x44
198
199#endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 3b9932ab1756..81e614bf2dc3 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -33,7 +33,7 @@
33#ifndef __RADEON_DRM_H__ 33#ifndef __RADEON_DRM_H__
34#define __RADEON_DRM_H__ 34#define __RADEON_DRM_H__
35 35
36#include <linux/types.h> 36#include "drm.h"
37 37
38/* WARNING: If you change any of these defines, make sure to change the 38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (radeon_sarea.h) 39 * defines in the X server file (radeon_sarea.h)
@@ -808,6 +808,7 @@ struct drm_radeon_gem_create {
808#define RADEON_TILING_SWAP_32BIT 0x8 808#define RADEON_TILING_SWAP_32BIT 0x8
809#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface 809#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface
810 * when mapped - i.e. front buffer */ 810 * when mapped - i.e. front buffer */
811#define RADEON_TILING_MICRO_SQUARE 0x20
811 812
812struct drm_radeon_gem_set_tiling { 813struct drm_radeon_gem_set_tiling {
813 uint32_t handle; 814 uint32_t handle;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 491146170522..81eb9f45883c 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -44,6 +44,29 @@ struct ttm_bo_device;
44 44
45struct drm_mm_node; 45struct drm_mm_node;
46 46
47
48/**
49 * struct ttm_placement
50 *
51 * @fpfn: first valid page frame number to put the object
52 * @lpfn: last valid page frame number to put the object
53 * @num_placement: number of prefered placements
54 * @placement: prefered placements
55 * @num_busy_placement: number of prefered placements when need to evict buffer
56 * @busy_placement: prefered placements when need to evict buffer
57 *
58 * Structure indicating the placement you request for an object.
59 */
60struct ttm_placement {
61 unsigned fpfn;
62 unsigned lpfn;
63 unsigned num_placement;
64 const uint32_t *placement;
65 unsigned num_busy_placement;
66 const uint32_t *busy_placement;
67};
68
69
47/** 70/**
48 * struct ttm_mem_reg 71 * struct ttm_mem_reg
49 * 72 *
@@ -109,10 +132,6 @@ struct ttm_tt;
109 * the object is destroyed. 132 * the object is destroyed.
110 * @event_queue: Queue for processes waiting on buffer object status change. 133 * @event_queue: Queue for processes waiting on buffer object status change.
111 * @lock: spinlock protecting mostly synchronization members. 134 * @lock: spinlock protecting mostly synchronization members.
112 * @proposed_placement: Proposed placement for the buffer. Changed only by the
113 * creator prior to validation as opposed to bo->mem.proposed_flags which is
114 * changed by the implementation prior to a buffer move if it wants to outsmart
115 * the buffer creator / user. This latter happens, for example, at eviction.
116 * @mem: structure describing current placement. 135 * @mem: structure describing current placement.
117 * @persistant_swap_storage: Usually the swap storage is deleted for buffers 136 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
118 * pinned in physical memory. If this behaviour is not desired, this member 137 * pinned in physical memory. If this behaviour is not desired, this member
@@ -177,7 +196,6 @@ struct ttm_buffer_object {
177 * Members protected by the bo::reserved lock. 196 * Members protected by the bo::reserved lock.
178 */ 197 */
179 198
180 uint32_t proposed_placement;
181 struct ttm_mem_reg mem; 199 struct ttm_mem_reg mem;
182 struct file *persistant_swap_storage; 200 struct file *persistant_swap_storage;
183 struct ttm_tt *ttm; 201 struct ttm_tt *ttm;
@@ -285,29 +303,30 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
285 * Note: It might be necessary to block validations before the 303 * Note: It might be necessary to block validations before the
286 * wait by reserving the buffer. 304 * wait by reserving the buffer.
287 * Returns -EBUSY if no_wait is true and the buffer is busy. 305 * Returns -EBUSY if no_wait is true and the buffer is busy.
288 * Returns -ERESTART if interrupted by a signal. 306 * Returns -ERESTARTSYS if interrupted by a signal.
289 */ 307 */
290extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, 308extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
291 bool interruptible, bool no_wait); 309 bool interruptible, bool no_wait);
292/** 310/**
293 * ttm_buffer_object_validate 311 * ttm_bo_validate
294 * 312 *
295 * @bo: The buffer object. 313 * @bo: The buffer object.
296 * @proposed_placement: Proposed_placement for the buffer object. 314 * @placement: Proposed placement for the buffer object.
297 * @interruptible: Sleep interruptible if sleeping. 315 * @interruptible: Sleep interruptible if sleeping.
298 * @no_wait: Return immediately if the buffer is busy. 316 * @no_wait: Return immediately if the buffer is busy.
299 * 317 *
300 * Changes placement and caching policy of the buffer object 318 * Changes placement and caching policy of the buffer object
301 * according to bo::proposed_flags. 319 * according proposed placement.
302 * Returns 320 * Returns
303 * -EINVAL on invalid proposed_flags. 321 * -EINVAL on invalid proposed placement.
304 * -ENOMEM on out-of-memory condition. 322 * -ENOMEM on out-of-memory condition.
305 * -EBUSY if no_wait is true and buffer busy. 323 * -EBUSY if no_wait is true and buffer busy.
306 * -ERESTART if interrupted by a signal. 324 * -ERESTARTSYS if interrupted by a signal.
307 */ 325 */
308extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, 326extern int ttm_bo_validate(struct ttm_buffer_object *bo,
309 uint32_t proposed_placement, 327 struct ttm_placement *placement,
310 bool interruptible, bool no_wait); 328 bool interruptible, bool no_wait);
329
311/** 330/**
312 * ttm_bo_unref 331 * ttm_bo_unref
313 * 332 *
@@ -328,7 +347,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo);
328 * waiting for buffer idle. This lock is recursive. 347 * waiting for buffer idle. This lock is recursive.
329 * Returns 348 * Returns
330 * -EBUSY if the buffer is busy and no_wait is true. 349 * -EBUSY if the buffer is busy and no_wait is true.
331 * -ERESTART if interrupted by a signal. 350 * -ERESTARTSYS if interrupted by a signal.
332 */ 351 */
333 352
334extern int 353extern int
@@ -343,7 +362,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
343extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); 362extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
344 363
345/** 364/**
346 * ttm_buffer_object_init 365 * ttm_bo_init
347 * 366 *
348 * @bdev: Pointer to a ttm_bo_device struct. 367 * @bdev: Pointer to a ttm_bo_device struct.
349 * @bo: Pointer to a ttm_buffer_object to be initialized. 368 * @bo: Pointer to a ttm_buffer_object to be initialized.
@@ -371,20 +390,20 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
371 * Returns 390 * Returns
372 * -ENOMEM: Out of memory. 391 * -ENOMEM: Out of memory.
373 * -EINVAL: Invalid placement flags. 392 * -EINVAL: Invalid placement flags.
374 * -ERESTART: Interrupted by signal while sleeping waiting for resources. 393 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
375 */ 394 */
376 395
377extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, 396extern int ttm_bo_init(struct ttm_bo_device *bdev,
378 struct ttm_buffer_object *bo, 397 struct ttm_buffer_object *bo,
379 unsigned long size, 398 unsigned long size,
380 enum ttm_bo_type type, 399 enum ttm_bo_type type,
381 uint32_t flags, 400 struct ttm_placement *placement,
382 uint32_t page_alignment, 401 uint32_t page_alignment,
383 unsigned long buffer_start, 402 unsigned long buffer_start,
384 bool interrubtible, 403 bool interrubtible,
385 struct file *persistant_swap_storage, 404 struct file *persistant_swap_storage,
386 size_t acc_size, 405 size_t acc_size,
387 void (*destroy) (struct ttm_buffer_object *)); 406 void (*destroy) (struct ttm_buffer_object *));
388/** 407/**
389 * ttm_bo_synccpu_object_init 408 * ttm_bo_synccpu_object_init
390 * 409 *
@@ -405,47 +424,43 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
405 * GEM user interface. 424 * GEM user interface.
406 * @p_bo: On successful completion *p_bo points to the created object. 425 * @p_bo: On successful completion *p_bo points to the created object.
407 * 426 *
408 * This function allocates a ttm_buffer_object, and then calls 427 * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
409 * ttm_buffer_object_init on that object. 428 * on that object. The destroy function is set to kfree().
410 * The destroy function is set to kfree().
411 * Returns 429 * Returns
412 * -ENOMEM: Out of memory. 430 * -ENOMEM: Out of memory.
413 * -EINVAL: Invalid placement flags. 431 * -EINVAL: Invalid placement flags.
414 * -ERESTART: Interrupted by signal while waiting for resources. 432 * -ERESTARTSYS: Interrupted by signal while waiting for resources.
415 */ 433 */
416 434
417extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, 435extern int ttm_bo_create(struct ttm_bo_device *bdev,
418 unsigned long size, 436 unsigned long size,
419 enum ttm_bo_type type, 437 enum ttm_bo_type type,
420 uint32_t flags, 438 struct ttm_placement *placement,
421 uint32_t page_alignment, 439 uint32_t page_alignment,
422 unsigned long buffer_start, 440 unsigned long buffer_start,
423 bool interruptible, 441 bool interruptible,
424 struct file *persistant_swap_storage, 442 struct file *persistant_swap_storage,
425 struct ttm_buffer_object **p_bo); 443 struct ttm_buffer_object **p_bo);
426 444
427/** 445/**
428 * ttm_bo_check_placement 446 * ttm_bo_check_placement
429 * 447 *
430 * @bo: the buffer object. 448 * @bo: the buffer object.
431 * @set_flags: placement flags to set. 449 * @placement: placements
432 * @clr_flags: placement flags to clear.
433 * 450 *
434 * Performs minimal validity checking on an intended change of 451 * Performs minimal validity checking on an intended change of
435 * placement flags. 452 * placement flags.
436 * Returns 453 * Returns
437 * -EINVAL: Intended change is invalid or not allowed. 454 * -EINVAL: Intended change is invalid or not allowed.
438 */ 455 */
439
440extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, 456extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
441 uint32_t set_flags, uint32_t clr_flags); 457 struct ttm_placement *placement);
442 458
443/** 459/**
444 * ttm_bo_init_mm 460 * ttm_bo_init_mm
445 * 461 *
446 * @bdev: Pointer to a ttm_bo_device struct. 462 * @bdev: Pointer to a ttm_bo_device struct.
447 * @mem_type: The memory type. 463 * @mem_type: The memory type.
448 * @p_offset: offset for managed area in pages.
449 * @p_size: size managed area in pages. 464 * @p_size: size managed area in pages.
450 * 465 *
451 * Initialize a manager for a given memory type. 466 * Initialize a manager for a given memory type.
@@ -458,7 +473,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
458 */ 473 */
459 474
460extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 475extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
461 unsigned long p_offset, unsigned long p_size); 476 unsigned long p_size);
462/** 477/**
463 * ttm_bo_clean_mm 478 * ttm_bo_clean_mm
464 * 479 *
@@ -503,7 +518,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
503 * 518 *
504 * Returns: 519 * Returns:
505 * -EINVAL: Invalid or uninitialized memory type. 520 * -EINVAL: Invalid or uninitialized memory type.
506 * -ERESTART: The call was interrupted by a signal while waiting to 521 * -ERESTARTSYS: The call was interrupted by a signal while waiting to
507 * evict a buffer. 522 * evict a buffer.
508 */ 523 */
509 524
@@ -606,7 +621,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
606 * be called from the fops::read and fops::write method. 621 * be called from the fops::read and fops::write method.
607 * Returns: 622 * Returns:
608 * See man (2) write, man(2) read. In particular, 623 * See man (2) write, man(2) read. In particular,
609 * the function may return -EINTR if 624 * the function may return -ERESTARTSYS if
610 * interrupted by a signal. 625 * interrupted by a signal.
611 */ 626 */
612 627
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e8cd6d20aed2..6b9db917e717 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -115,7 +115,6 @@ struct ttm_backend {
115 struct ttm_backend_func *func; 115 struct ttm_backend_func *func;
116}; 116};
117 117
118#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
119#define TTM_PAGE_FLAG_USER (1 << 1) 118#define TTM_PAGE_FLAG_USER (1 << 1)
120#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) 119#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
121#define TTM_PAGE_FLAG_WRITE (1 << 3) 120#define TTM_PAGE_FLAG_WRITE (1 << 3)
@@ -242,12 +241,6 @@ struct ttm_mem_type_manager {
242/** 241/**
243 * struct ttm_bo_driver 242 * struct ttm_bo_driver
244 * 243 *
245 * @mem_type_prio: Priority array of memory types to place a buffer object in
246 * if it fits without evicting buffers from any of these memory types.
247 * @mem_busy_prio: Priority array of memory types to place a buffer object in
248 * if it needs to evict buffers to make room.
249 * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
250 * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
251 * @create_ttm_backend_entry: Callback to create a struct ttm_backend. 244 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
252 * @invalidate_caches: Callback to invalidate read caches when a buffer object 245 * @invalidate_caches: Callback to invalidate read caches when a buffer object
253 * has been evicted. 246 * has been evicted.
@@ -265,11 +258,6 @@ struct ttm_mem_type_manager {
265 */ 258 */
266 259
267struct ttm_bo_driver { 260struct ttm_bo_driver {
268 const uint32_t *mem_type_prio;
269 const uint32_t *mem_busy_prio;
270 uint32_t num_mem_type_prio;
271 uint32_t num_mem_busy_prio;
272
273 /** 261 /**
274 * struct ttm_bo_driver member create_ttm_backend_entry 262 * struct ttm_bo_driver member create_ttm_backend_entry
275 * 263 *
@@ -306,7 +294,8 @@ struct ttm_bo_driver {
306 * finished, they'll end up in bo->mem.flags 294 * finished, they'll end up in bo->mem.flags
307 */ 295 */
308 296
309 uint32_t(*evict_flags) (struct ttm_buffer_object *bo); 297 void(*evict_flags) (struct ttm_buffer_object *bo,
298 struct ttm_placement *placement);
310 /** 299 /**
311 * struct ttm_bo_driver member move: 300 * struct ttm_bo_driver member move:
312 * 301 *
@@ -363,6 +352,11 @@ struct ttm_bo_driver {
363 /* notify the driver we are taking a fault on this BO 352 /* notify the driver we are taking a fault on this BO
364 * and have reserved it */ 353 * and have reserved it */
365 void (*fault_reserve_notify)(struct ttm_buffer_object *bo); 354 void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
355
356 /**
357 * notify the driver that we're about to swap out this bo
358 */
359 void (*swap_notify) (struct ttm_buffer_object *bo);
366}; 360};
367 361
368/** 362/**
@@ -545,6 +539,15 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
545extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 539extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
546 540
547/** 541/**
542 * ttm_tt_populate:
543 *
544 * @ttm: The struct ttm_tt to contain the backing pages.
545 *
546 * Add backing pages to all of @ttm
547 */
548extern int ttm_tt_populate(struct ttm_tt *ttm);
549
550/**
548 * ttm_ttm_destroy: 551 * ttm_ttm_destroy:
549 * 552 *
550 * @ttm: The struct ttm_tt. 553 * @ttm: The struct ttm_tt.
@@ -639,12 +642,12 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
639 * -EBUSY: No space available (only if no_wait == 1). 642 * -EBUSY: No space available (only if no_wait == 1).
640 * -ENOMEM: Could not allocate memory for the buffer object, either due to 643 * -ENOMEM: Could not allocate memory for the buffer object, either due to
641 * fragmentation or concurrent allocators. 644 * fragmentation or concurrent allocators.
642 * -ERESTART: An interruptible sleep was interrupted by a signal. 645 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
643 */ 646 */
644extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 647extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
645 uint32_t proposed_placement, 648 struct ttm_placement *placement,
646 struct ttm_mem_reg *mem, 649 struct ttm_mem_reg *mem,
647 bool interruptible, bool no_wait); 650 bool interruptible, bool no_wait);
648/** 651/**
649 * ttm_bo_wait_for_cpu 652 * ttm_bo_wait_for_cpu
650 * 653 *
@@ -654,7 +657,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
654 * Wait until a buffer object is no longer sync'ed for CPU access. 657 * Wait until a buffer object is no longer sync'ed for CPU access.
655 * Returns: 658 * Returns:
656 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). 659 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
657 * -ERESTART: An interruptible sleep was interrupted by a signal. 660 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
658 */ 661 */
659 662
660extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); 663extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
@@ -758,7 +761,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
758 * -EAGAIN: The reservation may cause a deadlock. 761 * -EAGAIN: The reservation may cause a deadlock.
759 * Release all buffer reservations, wait for @bo to become unreserved and 762 * Release all buffer reservations, wait for @bo to become unreserved and
760 * try again. (only if use_sequence == 1). 763 * try again. (only if use_sequence == 1).
761 * -ERESTART: A wait for the buffer to become unreserved was interrupted by 764 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
762 * a signal. Release all buffer reservations and return to user-space. 765 * a signal. Release all buffer reservations and return to user-space.
763 */ 766 */
764extern int ttm_bo_reserve(struct ttm_buffer_object *bo, 767extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
@@ -786,34 +789,6 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
786extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, 789extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
787 bool interruptible); 790 bool interruptible);
788 791
789/**
790 * ttm_bo_block_reservation
791 *
792 * @bo: A pointer to a struct ttm_buffer_object.
793 * @interruptible: Use interruptible sleep when waiting.
794 * @no_wait: Don't sleep, but rather return -EBUSY.
795 *
796 * Block reservation for validation by simply reserving the buffer.
797 * This is intended for single buffer use only without eviction,
798 * and thus needs no deadlock protection.
799 *
800 * Returns:
801 * -EBUSY: If no_wait == 1 and the buffer is already reserved.
802 * -ERESTART: If interruptible == 1 and the process received a signal
803 * while sleeping.
804 */
805extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
806 bool interruptible, bool no_wait);
807
808/**
809 * ttm_bo_unblock_reservation
810 *
811 * @bo: A pointer to a struct ttm_buffer_object.
812 *
813 * Unblocks reservation leaving lru lists untouched.
814 */
815extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
816
817/* 792/*
818 * ttm_bo_util.c 793 * ttm_bo_util.c
819 */ 794 */
@@ -904,7 +879,7 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
904 * Utility function that returns the pgprot_t that should be used for 879 * Utility function that returns the pgprot_t that should be used for
905 * setting up a PTE with the caching model indicated by @c_state. 880 * setting up a PTE with the caching model indicated by @c_state.
906 */ 881 */
907extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp); 882extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
908 883
909#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 884#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
910#define TTM_HAS_AGP 885#define TTM_HAS_AGP
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
new file mode 100644
index 000000000000..cd2c475da9ea
--- /dev/null
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -0,0 +1,107 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_EXECBUF_UTIL_H_
32#define _TTM_EXECBUF_UTIL_H_
33
34#include "ttm/ttm_bo_api.h"
35#include <linux/list.h>
36
37/**
38 * struct ttm_validate_buffer
39 *
40 * @head: list head for thread-private list.
41 * @bo: refcounted buffer object pointer.
42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
43 * adding a new sync object.
44 * @reservied: Indicates whether @bo has been reserved for validation.
45 */
46
47struct ttm_validate_buffer {
48 struct list_head head;
49 struct ttm_buffer_object *bo;
50 void *new_sync_obj_arg;
51 bool reserved;
52};
53
54/**
55 * function ttm_eu_backoff_reservation
56 *
57 * @list: thread private list of ttm_validate_buffer structs.
58 *
59 * Undoes all buffer validation reservations for bos pointed to by
60 * the list entries.
61 */
62
63extern void ttm_eu_backoff_reservation(struct list_head *list);
64
65/**
66 * function ttm_eu_reserve_buffers
67 *
68 * @list: thread private list of ttm_validate_buffer structs.
69 * @val_seq: A unique sequence number.
70 *
71 * Tries to reserve bos pointed to by the list entries for validation.
72 * If the function returns 0, all buffers are marked as "unfenced",
73 * taken off the lru lists and are not synced for write CPU usage.
74 *
75 * If the function detects a deadlock due to multiple threads trying to
76 * reserve the same buffers in reverse order, all threads except one will
77 * back off and retry. This function may sleep while waiting for
78 * CPU write reservations to be cleared, and for other threads to
79 * unreserve their buffers.
80 *
81 * This function may return -ERESTART or -EAGAIN if the calling process
82 * receives a signal while waiting. In that case, no buffers on the list
83 * will be reserved upon return.
84 *
85 * Buffers reserved by this function should be unreserved by
86 * a call to either ttm_eu_backoff_reservation() or
87 * ttm_eu_fence_buffer_objects() when command submission is complete or
88 * has failed.
89 */
90
91extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
92
93/**
94 * function ttm_eu_fence_buffer_objects.
95 *
96 * @list: thread private list of ttm_validate_buffer structs.
97 * @sync_obj: The new sync object for the buffers.
98 *
99 * This function should be called when command submission is complete, and
100 * it will add a new sync object to bos pointed to by entries on @list.
101 * It also unreserves all buffers, putting them on lru lists.
102 *
103 */
104
105extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
106
107#endif
diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h
new file mode 100644
index 000000000000..81ba0b0b891a
--- /dev/null
+++ b/include/drm/ttm/ttm_lock.h
@@ -0,0 +1,247 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31/** @file ttm_lock.h
32 * This file implements a simple replacement for the buffer manager use
33 * of the DRM heavyweight hardware lock.
34 * The lock is a read-write lock. Taking it in read mode and write mode
35 * is relatively fast, and intended for in-kernel use only.
36 *
37 * The vt mode is used only when there is a need to block all
38 * user-space processes from validating buffers.
39 * It's allowed to leave kernel space with the vt lock held.
40 * If a user-space process dies while having the vt-lock,
41 * it will be released during the file descriptor release. The vt lock
42 * excludes write lock and read lock.
43 *
44 * The suspend mode is used to lock out all TTM users when preparing for
45 * and executing suspend operations.
46 *
47 */
48
49#ifndef _TTM_LOCK_H_
50#define _TTM_LOCK_H_
51
52#include "ttm/ttm_object.h"
53#include <linux/wait.h>
54#include <asm/atomic.h>
55
56/**
57 * struct ttm_lock
58 *
59 * @base: ttm base object used solely to release the lock if the client
60 * holding the lock dies.
61 * @queue: Queue for processes waiting for lock change-of-status.
62 * @lock: Spinlock protecting some lock members.
63 * @rw: Read-write lock counter. Protected by @lock.
64 * @flags: Lock state. Protected by @lock.
65 * @kill_takers: Boolean whether to kill takers of the lock.
66 * @signal: Signal to send when kill_takers is true.
67 */
68
69struct ttm_lock {
70 struct ttm_base_object base;
71 wait_queue_head_t queue;
72 spinlock_t lock;
73 int32_t rw;
74 uint32_t flags;
75 bool kill_takers;
76 int signal;
77 struct ttm_object_file *vt_holder;
78};
79
80
81/**
82 * ttm_lock_init
83 *
84 * @lock: Pointer to a struct ttm_lock
85 * Initializes the lock.
86 */
87extern void ttm_lock_init(struct ttm_lock *lock);
88
89/**
90 * ttm_read_unlock
91 *
92 * @lock: Pointer to a struct ttm_lock
93 *
94 * Releases a read lock.
95 */
96extern void ttm_read_unlock(struct ttm_lock *lock);
97
98/**
99 * ttm_read_lock
100 *
101 * @lock: Pointer to a struct ttm_lock
102 * @interruptible: Interruptible sleeping while waiting for a lock.
103 *
104 * Takes the lock in read mode.
105 * Returns:
106 * -ERESTARTSYS If interrupted by a signal and interruptible is true.
107 */
108extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
109
110/**
111 * ttm_read_trylock
112 *
113 * @lock: Pointer to a struct ttm_lock
114 * @interruptible: Interruptible sleeping while waiting for a lock.
115 *
116 * Tries to take the lock in read mode. If the lock is already held
117 * in write mode, the function will return -EBUSY. If the lock is held
118 * in vt or suspend mode, the function will sleep until these modes
119 * are unlocked.
120 *
121 * Returns:
122 * -EBUSY The lock was already held in write mode.
123 * -ERESTARTSYS If interrupted by a signal and interruptible is true.
124 */
125extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
126
127/**
128 * ttm_write_unlock
129 *
130 * @lock: Pointer to a struct ttm_lock
131 *
132 * Releases a write lock.
133 */
134extern void ttm_write_unlock(struct ttm_lock *lock);
135
136/**
137 * ttm_write_lock
138 *
139 * @lock: Pointer to a struct ttm_lock
140 * @interruptible: Interruptible sleeping while waiting for a lock.
141 *
142 * Takes the lock in write mode.
143 * Returns:
144 * -ERESTARTSYS If interrupted by a signal and interruptible is true.
145 */
146extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
147
148/**
149 * ttm_lock_downgrade
150 *
151 * @lock: Pointer to a struct ttm_lock
152 *
153 * Downgrades a write lock to a read lock.
154 */
155extern void ttm_lock_downgrade(struct ttm_lock *lock);
156
157/**
158 * ttm_suspend_lock
159 *
160 * @lock: Pointer to a struct ttm_lock
161 *
162 * Takes the lock in suspend mode. Excludes read and write mode.
163 */
164extern void ttm_suspend_lock(struct ttm_lock *lock);
165
166/**
167 * ttm_suspend_unlock
168 *
169 * @lock: Pointer to a struct ttm_lock
170 *
171 * Releases a suspend lock
172 */
173extern void ttm_suspend_unlock(struct ttm_lock *lock);
174
175/**
176 * ttm_vt_lock
177 *
178 * @lock: Pointer to a struct ttm_lock
179 * @interruptible: Interruptible sleeping while waiting for a lock.
180 * @tfile: Pointer to a struct ttm_object_file to register the lock with.
181 *
182 * Takes the lock in vt mode.
183 * Returns:
184 * -ERESTARTSYS If interrupted by a signal and interruptible is true.
185 * -ENOMEM: Out of memory when locking.
186 */
187extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
188 struct ttm_object_file *tfile);
189
190/**
191 * ttm_vt_unlock
192 *
193 * @lock: Pointer to a struct ttm_lock
194 *
195 * Releases a vt lock.
196 * Returns:
197 * -EINVAL If the lock was not held.
198 */
199extern int ttm_vt_unlock(struct ttm_lock *lock);
200
201/**
202 * ttm_write_unlock
203 *
204 * @lock: Pointer to a struct ttm_lock
205 *
206 * Releases a write lock.
207 */
208extern void ttm_write_unlock(struct ttm_lock *lock);
209
210/**
211 * ttm_write_lock
212 *
213 * @lock: Pointer to a struct ttm_lock
214 * @interruptible: Interruptible sleeping while waiting for a lock.
215 *
216 * Takes the lock in write mode.
217 * Returns:
218 * -ERESTARTSYS If interrupted by a signal and interruptible is true.
219 */
220extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
221
222/**
223 * ttm_lock_set_kill
224 *
225 * @lock: Pointer to a struct ttm_lock
226 * @val: Boolean whether to kill processes taking the lock.
227 * @signal: Signal to send to the process taking the lock.
228 *
229 * The kill-when-taking-lock functionality is used to kill processes that keep
230 * on using the TTM functionality when its resources has been taken down, for
231 * example when the X server exits. A typical sequence would look like this:
232 * - X server takes lock in write mode.
233 * - ttm_lock_set_kill() is called with @val set to true.
234 * - As part of X server exit, TTM resources are taken down.
235 * - X server releases the lock on file release.
236 * - Another dri client wants to render, takes the lock and is killed.
237 *
238 */
239static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
240 int signal)
241{
242 lock->kill_takers = val;
243 if (val)
244 lock->signal = signal;
245}
246
247#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 6983a7cf4da4..b199170b3c2c 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -33,6 +33,7 @@
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/kobject.h> 35#include <linux/kobject.h>
36#include <linux/mm.h>
36 37
37/** 38/**
38 * struct ttm_mem_shrink - callback to shrink TTM memory usage. 39 * struct ttm_mem_shrink - callback to shrink TTM memory usage.
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
new file mode 100644
index 000000000000..0d9db099978b
--- /dev/null
+++ b/include/drm/ttm/ttm_object.h
@@ -0,0 +1,271 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30/** @file ttm_object.h
31 *
32 * Base- and reference object implementation for the various
33 * ttm objects. Implements reference counting, minimal security checks
34 * and release on file close.
35 */
36
37#ifndef _TTM_OBJECT_H_
38#define _TTM_OBJECT_H_
39
40#include <linux/list.h>
41#include "drm_hashtab.h"
42#include <linux/kref.h>
43#include <ttm/ttm_memory.h>
44
45/**
46 * enum ttm_ref_type
47 *
48 * Describes what type of reference a ref object holds.
49 *
50 * TTM_REF_USAGE is a simple refcount on a base object.
51 *
52 * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
53 * buffer object.
54 *
55 * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
56 * buffer object.
57 *
58 */
59
60enum ttm_ref_type {
61 TTM_REF_USAGE,
62 TTM_REF_SYNCCPU_READ,
63 TTM_REF_SYNCCPU_WRITE,
64 TTM_REF_NUM
65};
66
67/**
68 * enum ttm_object_type
69 *
70 * One entry per ttm object type.
71 * Device-specific types should use the
72 * ttm_driver_typex types.
73 */
74
75enum ttm_object_type {
76 ttm_fence_type,
77 ttm_buffer_type,
78 ttm_lock_type,
79 ttm_driver_type0 = 256,
80 ttm_driver_type1,
81 ttm_driver_type2,
82 ttm_driver_type3,
83 ttm_driver_type4,
84 ttm_driver_type5
85};
86
87struct ttm_object_file;
88struct ttm_object_device;
89
90/**
91 * struct ttm_base_object
92 *
93 * @hash: hash entry for the per-device object hash.
94 * @type: derived type this object is base class for.
95 * @shareable: Other ttm_object_files can access this object.
96 *
97 * @tfile: Pointer to ttm_object_file of the creator.
98 * NULL if the object was not created by a user request.
99 * (kernel object).
100 *
101 * @refcount: Number of references to this object, not
102 * including the hash entry. A reference to a base object can
103 * only be held by a ref object.
104 *
105 * @refcount_release: A function to be called when there are
106 * no more references to this object. This function should
107 * destroy the object (or make sure destruction eventually happens),
108 * and when it is called, the object has
109 * already been taken out of the per-device hash. The parameter
110 * "base" should be set to NULL by the function.
111 *
112 * @ref_obj_release: A function to be called when a reference object
113 * with another ttm_ref_type than TTM_REF_USAGE is deleted.
114 * this function may, for example, release a lock held by a user-space
115 * process.
116 *
117 * This struct is intended to be used as a base struct for objects that
118 * are visible to user-space. It provides a global name, race-safe
119 * access and refcounting, minimal access contol and hooks for unref actions.
120 */
121
122struct ttm_base_object {
123 struct drm_hash_item hash;
124 enum ttm_object_type object_type;
125 bool shareable;
126 struct ttm_object_file *tfile;
127 struct kref refcount;
128 void (*refcount_release) (struct ttm_base_object **base);
129 void (*ref_obj_release) (struct ttm_base_object *base,
130 enum ttm_ref_type ref_type);
131};
132
133/**
134 * ttm_base_object_init
135 *
136 * @tfile: Pointer to a struct ttm_object_file.
137 * @base: The struct ttm_base_object to initialize.
138 * @shareable: This object is shareable with other applcations.
139 * (different @tfile pointers.)
140 * @type: The object type.
141 * @refcount_release: See the struct ttm_base_object description.
142 * @ref_obj_release: See the struct ttm_base_object description.
143 *
144 * Initializes a struct ttm_base_object.
145 */
146
147extern int ttm_base_object_init(struct ttm_object_file *tfile,
148 struct ttm_base_object *base,
149 bool shareable,
150 enum ttm_object_type type,
151 void (*refcount_release) (struct ttm_base_object
152 **),
153 void (*ref_obj_release) (struct ttm_base_object
154 *,
155 enum ttm_ref_type
156 ref_type));
157
158/**
159 * ttm_base_object_lookup
160 *
161 * @tfile: Pointer to a struct ttm_object_file.
162 * @key: Hash key
163 *
164 * Looks up a struct ttm_base_object with the key @key.
165 * Also verifies that the object is visible to the application, by
166 * comparing the @tfile argument and checking the object shareable flag.
167 */
168
169extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
170 *tfile, uint32_t key);
171
172/**
173 * ttm_base_object_unref
174 *
175 * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
176 *
177 * Decrements the base object refcount and clears the pointer pointed to by
178 * p_base.
179 */
180
181extern void ttm_base_object_unref(struct ttm_base_object **p_base);
182
183/**
184 * ttm_ref_object_add.
185 *
186 * @tfile: A struct ttm_object_file representing the application owning the
187 * ref_object.
188 * @base: The base object to reference.
189 * @ref_type: The type of reference.
190 * @existed: Upon completion, indicates that an identical reference object
191 * already existed, and the refcount was upped on that object instead.
192 *
193 * Adding a ref object to a base object is basically like referencing the
194 * base object, but a user-space application holds the reference. When the
195 * file corresponding to @tfile is closed, all its reference objects are
196 * deleted. A reference object can have different types depending on what
197 * it's intended for. It can be refcounting to prevent object destruction,
198 * When user-space takes a lock, it can add a ref object to that lock to
199 * make sure the lock is released if the application dies. A ref object
200 * will hold a single reference on a base object.
201 */
202extern int ttm_ref_object_add(struct ttm_object_file *tfile,
203 struct ttm_base_object *base,
204 enum ttm_ref_type ref_type, bool *existed);
205/**
206 * ttm_ref_object_base_unref
207 *
208 * @key: Key representing the base object.
209 * @ref_type: Ref type of the ref object to be dereferenced.
210 *
211 * Unreference a ref object with type @ref_type
212 * on the base object identified by @key. If there are no duplicate
213 * references, the ref object will be destroyed and the base object
214 * will be unreferenced.
215 */
216extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
217 unsigned long key,
218 enum ttm_ref_type ref_type);
219
220/**
221 * ttm_object_file_init - initialize a struct ttm_object file
222 *
223 * @tdev: A struct ttm_object device this file is initialized on.
224 * @hash_order: Order of the hash table used to hold the reference objects.
225 *
226 * This is typically called by the file_ops::open function.
227 */
228
229extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
230 *tdev,
231 unsigned int hash_order);
232
233/**
234 * ttm_object_file_release - release data held by a ttm_object_file
235 *
236 * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
237 * *p_tfile will be set to NULL by this function.
238 *
239 * Releases all data associated by a ttm_object_file.
240 * Typically called from file_ops::release. The caller must
241 * ensure that there are no concurrent users of tfile.
242 */
243
244extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
245
246/**
247 * ttm_object device init - initialize a struct ttm_object_device
248 *
249 * @hash_order: Order of hash table used to hash the base objects.
250 *
251 * This function is typically called on device initialization to prepare
252 * data structures needed for ttm base and ref objects.
253 */
254
255extern struct ttm_object_device *ttm_object_device_init
256 (struct ttm_mem_global *mem_glob, unsigned int hash_order);
257
258/**
259 * ttm_object_device_release - release data held by a ttm_object_device
260 *
261 * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
262 * *p_tdev will be set to NULL by this function.
263 *
264 * Releases all data associated by a ttm_object_device.
265 * Typically called from driver::unload before the destruction of the
266 * device private data structure.
267 */
268
269extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
270
271#endif
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
index 170786e5c2ff..fd11a5bd892d 100644
--- a/include/drm/via_drm.h
+++ b/include/drm/via_drm.h
@@ -24,7 +24,7 @@
24#ifndef _VIA_DRM_H_ 24#ifndef _VIA_DRM_H_
25#define _VIA_DRM_H_ 25#define _VIA_DRM_H_
26 26
27#include <linux/types.h> 27#include "drm.h"
28 28
29/* WARNING: These defines must be the same as what the Xserver uses. 29/* WARNING: These defines must be the same as what the Xserver uses.
30 * if you change them, you must change the defines in the Xserver. 30 * if you change them, you must change the defines in the Xserver.
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
new file mode 100644
index 000000000000..c7645f480d12
--- /dev/null
+++ b/include/drm/vmwgfx_drm.h
@@ -0,0 +1,588 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__
30
31#define DRM_VMW_MAX_SURFACE_FACES 6
32#define DRM_VMW_MAX_MIP_LEVELS 24
33
34#define DRM_VMW_EXT_NAME_LEN 128
35
36#define DRM_VMW_GET_PARAM 0
37#define DRM_VMW_ALLOC_DMABUF 1
38#define DRM_VMW_UNREF_DMABUF 2
39#define DRM_VMW_CURSOR_BYPASS 3
40/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
41#define DRM_VMW_CONTROL_STREAM 4
42#define DRM_VMW_CLAIM_STREAM 5
43#define DRM_VMW_UNREF_STREAM 6
44/* guarded by DRM_VMW_PARAM_3D == 1 */
45#define DRM_VMW_CREATE_CONTEXT 7
46#define DRM_VMW_UNREF_CONTEXT 8
47#define DRM_VMW_CREATE_SURFACE 9
48#define DRM_VMW_UNREF_SURFACE 10
49#define DRM_VMW_REF_SURFACE 11
50#define DRM_VMW_EXECBUF 12
51#define DRM_VMW_FIFO_DEBUG 13
52#define DRM_VMW_FENCE_WAIT 14
53
54
55/*************************************************************************/
56/**
57 * DRM_VMW_GET_PARAM - get device information.
58 *
59 * DRM_VMW_PARAM_FIFO_OFFSET:
60 * Offset to use to map the first page of the FIFO read-only.
61 * The fifo is mapped using the mmap() system call on the drm device.
62 *
63 * DRM_VMW_PARAM_OVERLAY_IOCTL:
64 * Does the driver support the overlay ioctl.
65 */
66
67#define DRM_VMW_PARAM_NUM_STREAMS 0
68#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
69#define DRM_VMW_PARAM_3D 2
70#define DRM_VMW_PARAM_FIFO_OFFSET 3
71#define DRM_VMW_PARAM_HW_CAPS 4
72#define DRM_VMW_PARAM_FIFO_CAPS 5
73
74/**
75 * struct drm_vmw_getparam_arg
76 *
77 * @value: Returned value. //Out
78 * @param: Parameter to query. //In.
79 *
80 * Argument to the DRM_VMW_GET_PARAM Ioctl.
81 */
82
83struct drm_vmw_getparam_arg {
84 uint64_t value;
85 uint32_t param;
86 uint32_t pad64;
87};
88
89/*************************************************************************/
90/**
91 * DRM_VMW_EXTENSION - Query device extensions.
92 */
93
94/**
95 * struct drm_vmw_extension_rep
96 *
97 * @exists: The queried extension exists.
98 * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
99 * @driver_sarea_offset: Offset to any space in the DRI SAREA
100 * used by the extension.
101 * @major: Major version number of the extension.
102 * @minor: Minor version number of the extension.
103 * @pl: Patch level version number of the extension.
104 *
105 * Output argument to the DRM_VMW_EXTENSION Ioctl.
106 */
107
108struct drm_vmw_extension_rep {
109 int32_t exists;
110 uint32_t driver_ioctl_offset;
111 uint32_t driver_sarea_offset;
112 uint32_t major;
113 uint32_t minor;
114 uint32_t pl;
115 uint32_t pad64;
116};
117
118/**
119 * union drm_vmw_extension_arg
120 *
121 * @extension - Ascii name of the extension to be queried. //In
122 * @rep - Reply as defined above. //Out
123 *
124 * Argument to the DRM_VMW_EXTENSION Ioctl.
125 */
126
127union drm_vmw_extension_arg {
128 char extension[DRM_VMW_EXT_NAME_LEN];
129 struct drm_vmw_extension_rep rep;
130};
131
132/*************************************************************************/
133/**
134 * DRM_VMW_CREATE_CONTEXT - Create a host context.
135 *
136 * Allocates a device unique context id, and queues a create context command
137 * for the host. Does not wait for host completion.
138 */
139
140/**
141 * struct drm_vmw_context_arg
142 *
143 * @cid: Device unique context ID.
144 *
145 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
146 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
147 */
148
149struct drm_vmw_context_arg {
150 int32_t cid;
151 uint32_t pad64;
152};
153
154/*************************************************************************/
155/**
156 * DRM_VMW_UNREF_CONTEXT - Create a host context.
157 *
158 * Frees a global context id, and queues a destroy host command for the host.
159 * Does not wait for host completion. The context ID can be used directly
160 * in the command stream and shows up as the same context ID on the host.
161 */
162
163/*************************************************************************/
164/**
165 * DRM_VMW_CREATE_SURFACE - Create a host suface.
166 *
167 * Allocates a device unique surface id, and queues a create surface command
168 * for the host. Does not wait for host completion. The surface ID can be
169 * used directly in the command stream and shows up as the same surface
170 * ID on the host.
171 */
172
173/**
174 * struct drm_wmv_surface_create_req
175 *
176 * @flags: Surface flags as understood by the host.
177 * @format: Surface format as understood by the host.
178 * @mip_levels: Number of mip levels for each face.
179 * An unused face should have 0 encoded.
180 * @size_addr: Address of a user-space array of sruct drm_vmw_size
181 * cast to an uint64_t for 32-64 bit compatibility.
182 * The size of the array should equal the total number of mipmap levels.
183 * @shareable: Boolean whether other clients (as identified by file descriptors)
184 * may reference this surface.
185 * @scanout: Boolean whether the surface is intended to be used as a
186 * scanout.
187 *
188 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
189 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
190 */
191
192struct drm_vmw_surface_create_req {
193 uint32_t flags;
194 uint32_t format;
195 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
196 uint64_t size_addr;
197 int32_t shareable;
198 int32_t scanout;
199};
200
201/**
202 * struct drm_wmv_surface_arg
203 *
204 * @sid: Surface id of created surface or surface to destroy or reference.
205 *
206 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
207 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
208 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
209 */
210
211struct drm_vmw_surface_arg {
212 int32_t sid;
213 uint32_t pad64;
214};
215
216/**
217 * struct drm_vmw_size ioctl.
218 *
219 * @width - mip level width
220 * @height - mip level height
221 * @depth - mip level depth
222 *
223 * Description of a mip level.
224 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
225 */
226
227struct drm_vmw_size {
228 uint32_t width;
229 uint32_t height;
230 uint32_t depth;
231 uint32_t pad64;
232};
233
234/**
235 * union drm_vmw_surface_create_arg
236 *
237 * @rep: Output data as described above.
238 * @req: Input data as described above.
239 *
240 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
241 */
242
243union drm_vmw_surface_create_arg {
244 struct drm_vmw_surface_arg rep;
245 struct drm_vmw_surface_create_req req;
246};
247
248/*************************************************************************/
249/**
250 * DRM_VMW_REF_SURFACE - Reference a host surface.
251 *
252 * Puts a reference on a host surface with a give sid, as previously
253 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
254 * A reference will make sure the surface isn't destroyed while we hold
255 * it and will allow the calling client to use the surface ID in the command
256 * stream.
257 *
258 * On successful return, the Ioctl returns the surface information given
259 * in the DRM_VMW_CREATE_SURFACE ioctl.
260 */
261
262/**
263 * union drm_vmw_surface_reference_arg
264 *
265 * @rep: Output data as described above.
266 * @req: Input data as described above.
267 *
268 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
269 */
270
271union drm_vmw_surface_reference_arg {
272 struct drm_vmw_surface_create_req rep;
273 struct drm_vmw_surface_arg req;
274};
275
276/*************************************************************************/
277/**
278 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
279 *
280 * Clear a reference previously put on a host surface.
281 * When all references are gone, including the one implicitly placed
282 * on creation,
283 * a destroy surface command will be queued for the host.
284 * Does not wait for completion.
285 */
286
287/*************************************************************************/
288/**
289 * DRM_VMW_EXECBUF
290 *
291 * Submit a command buffer for execution on the host, and return a
292 * fence sequence that when signaled, indicates that the command buffer has
293 * executed.
294 */
295
296/**
297 * struct drm_vmw_execbuf_arg
298 *
299 * @commands: User-space address of a command buffer cast to an uint64_t.
300 * @command-size: Size in bytes of the command buffer.
301 * @throttle-us: Sleep until software is less than @throttle_us
302 * microseconds ahead of hardware. The driver may round this value
303 * to the nearest kernel tick.
304 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
305 * uint64_t.
306 * @version: Allows expanding the execbuf ioctl parameters without breaking
307 * backwards compatibility, since user-space will always tell the kernel
308 * which version it uses.
309 * @flags: Execbuf flags. None currently.
310 *
311 * Argument to the DRM_VMW_EXECBUF Ioctl.
312 */
313
314#define DRM_VMW_EXECBUF_VERSION 0
315
316struct drm_vmw_execbuf_arg {
317 uint64_t commands;
318 uint32_t command_size;
319 uint32_t throttle_us;
320 uint64_t fence_rep;
321 uint32_t version;
322 uint32_t flags;
323};
324
325/**
326 * struct drm_vmw_fence_rep
327 *
328 * @fence_seq: Fence sequence associated with a command submission.
329 * @error: This member should've been set to -EFAULT on submission.
330 * The following actions should be take on completion:
331 * error == -EFAULT: Fence communication failed. The host is synchronized.
332 * Use the last fence id read from the FIFO fence register.
333 * error != 0 && error != -EFAULT:
334 * Fence submission failed. The host is synchronized. Use the fence_seq member.
335 * error == 0: All is OK, The host may not be synchronized.
336 * Use the fence_seq member.
337 *
338 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
339 */
340
341struct drm_vmw_fence_rep {
342 uint64_t fence_seq;
343 int32_t error;
344 uint32_t pad64;
345};
346
347/*************************************************************************/
348/**
349 * DRM_VMW_ALLOC_DMABUF
350 *
351 * Allocate a DMA buffer that is visible also to the host.
352 * NOTE: The buffer is
353 * identified by a handle and an offset, which are private to the guest, but
354 * useable in the command stream. The guest kernel may translate these
355 * and patch up the command stream accordingly. In the future, the offset may
356 * be zero at all times, or it may disappear from the interface before it is
357 * fixed.
358 *
359 * The DMA buffer may stay user-space mapped in the guest at all times,
360 * and is thus suitable for sub-allocation.
361 *
362 * DMA buffers are mapped using the mmap() syscall on the drm device.
363 */
364
365/**
366 * struct drm_vmw_alloc_dmabuf_req
367 *
368 * @size: Required minimum size of the buffer.
369 *
370 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
371 */
372
373struct drm_vmw_alloc_dmabuf_req {
374 uint32_t size;
375 uint32_t pad64;
376};
377
378/**
379 * struct drm_vmw_dmabuf_rep
380 *
381 * @map_handle: Offset to use in the mmap() call used to map the buffer.
382 * @handle: Handle unique to this buffer. Used for unreferencing.
383 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
384 * referenced. See not above.
385 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
386 * referenced. See note above.
387 *
388 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
389 */
390
391struct drm_vmw_dmabuf_rep {
392 uint64_t map_handle;
393 uint32_t handle;
394 uint32_t cur_gmr_id;
395 uint32_t cur_gmr_offset;
396 uint32_t pad64;
397};
398
399/**
400 * union drm_vmw_dmabuf_arg
401 *
402 * @req: Input data as described above.
403 * @rep: Output data as described above.
404 *
405 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
406 */
407
408union drm_vmw_alloc_dmabuf_arg {
409 struct drm_vmw_alloc_dmabuf_req req;
410 struct drm_vmw_dmabuf_rep rep;
411};
412
413/*************************************************************************/
414/**
415 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
416 *
417 */
418
419/**
420 * struct drm_vmw_unref_dmabuf_arg
421 *
422 * @handle: Handle indicating what buffer to free. Obtained from the
423 * DRM_VMW_ALLOC_DMABUF Ioctl.
424 *
425 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
426 */
427
428struct drm_vmw_unref_dmabuf_arg {
429 uint32_t handle;
430 uint32_t pad64;
431};
432
433/*************************************************************************/
434/**
435 * DRM_VMW_FIFO_DEBUG - Get last FIFO submission.
436 *
437 * This IOCTL copies the last FIFO submission directly out of the FIFO buffer.
438 */
439
440/**
441 * struct drm_vmw_fifo_debug_arg
442 *
443 * @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In
444 * @debug_buffer_size: Size in bytes of debug buffer //In
445 * @used_size: Number of bytes copied to the buffer // Out
446 * @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out
447 *
448 * Argument to the DRM_VMW_FIFO_DEBUG Ioctl.
449 */
450
451struct drm_vmw_fifo_debug_arg {
452 uint64_t debug_buffer;
453 uint32_t debug_buffer_size;
454 uint32_t used_size;
455 int32_t did_not_fit;
456 uint32_t pad64;
457};
458
459struct drm_vmw_fence_wait_arg {
460 uint64_t sequence;
461 uint64_t kernel_cookie;
462 int32_t cookie_valid;
463 int32_t pad64;
464};
465
466/*************************************************************************/
467/**
468 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
469 *
470 * This IOCTL controls the overlay units of the svga device.
471 * The SVGA overlay units does not work like regular hardware units in
472 * that they do not automaticaly read back the contents of the given dma
473 * buffer. But instead only read back for each call to this ioctl, and
474 * at any point between this call being made and a following call that
475 * either changes the buffer or disables the stream.
476 */
477
478/**
479 * struct drm_vmw_rect
480 *
481 * Defines a rectangle. Used in the overlay ioctl to define
482 * source and destination rectangle.
483 */
484
485struct drm_vmw_rect {
486 int32_t x;
487 int32_t y;
488 uint32_t w;
489 uint32_t h;
490};
491
492/**
493 * struct drm_vmw_control_stream_arg
494 *
495 * @stream_id: Stearm to control
496 * @enabled: If false all following arguments are ignored.
497 * @handle: Handle to buffer for getting data from.
498 * @format: Format of the overlay as understood by the host.
499 * @width: Width of the overlay.
500 * @height: Height of the overlay.
501 * @size: Size of the overlay in bytes.
502 * @pitch: Array of pitches, the two last are only used for YUV12 formats.
503 * @offset: Offset from start of dma buffer to overlay.
504 * @src: Source rect, must be within the defined area above.
505 * @dst: Destination rect, x and y may be negative.
506 *
507 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
508 */
509
510struct drm_vmw_control_stream_arg {
511 uint32_t stream_id;
512 uint32_t enabled;
513
514 uint32_t flags;
515 uint32_t color_key;
516
517 uint32_t handle;
518 uint32_t offset;
519 int32_t format;
520 uint32_t size;
521 uint32_t width;
522 uint32_t height;
523 uint32_t pitch[3];
524
525 uint32_t pad64;
526 struct drm_vmw_rect src;
527 struct drm_vmw_rect dst;
528};
529
530/*************************************************************************/
531/**
532 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
533 *
534 */
535
536#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
537#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
538
539/**
540 * struct drm_vmw_cursor_bypass_arg
541 *
542 * @flags: Flags.
543 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
544 * @xpos: X position of cursor.
545 * @ypos: Y position of cursor.
546 * @xhot: X hotspot.
547 * @yhot: Y hotspot.
548 *
549 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
550 */
551
552struct drm_vmw_cursor_bypass_arg {
553 uint32_t flags;
554 uint32_t crtc_id;
555 int32_t xpos;
556 int32_t ypos;
557 int32_t xhot;
558 int32_t yhot;
559};
560
561/*************************************************************************/
562/**
563 * DRM_VMW_CLAIM_STREAM - Claim a single stream.
564 */
565
566/**
567 * struct drm_vmw_context_arg
568 *
569 * @stream_id: Device unique context ID.
570 *
571 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
572 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
573 */
574
575struct drm_vmw_stream_arg {
576 uint32_t stream_id;
577 uint32_t pad64;
578};
579
580/*************************************************************************/
581/**
582 * DRM_VMW_UNREF_STREAM - Unclaim a stream.
583 *
584 * Return a single stream that was claimed by this process. Also makes
585 * sure that the stream has been stopped.
586 */
587
588#endif