aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-07-30 15:06:12 -0400
committerDave Airlie <airlied@linux.ie>2008-10-17 17:10:12 -0400
commit673a394b1e3b69be886ff24abfd6df97c52e8d08 (patch)
tree61ca8299333ab50ffc46cf328b20eb25133392ff /include
parentd1d8c925b71dd6753bf438f9e14a9e5c5183bcc6 (diff)
drm: Add GEM ("graphics execution manager") to i915 driver.
GEM allows the creation of persistent buffer objects accessible by the graphics device through new ioctls for managing execution of commands on the device. The userland API is almost entirely driver-specific to ensure that any driver building on this model can easily map the interface to individual driver requirements. GEM is used by the 2d driver for managing its internal state allocations and will be used for pixmap storage to reduce memory consumption and enable zero-copy GLX_EXT_texture_from_pixmap, and in the 3d driver is used to enable GL_EXT_framebuffer_object and GL_ARB_pixel_buffer_object. Signed-off-by: Eric Anholt <eric@anholt.net> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/drm/drm.h31
-rw-r--r--include/drm/drmP.h151
-rw-r--r--include/drm/i915_drm.h332
3 files changed, 514 insertions, 0 deletions
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 15e55039b7f1..f46ba4b57da4 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -570,6 +570,34 @@ struct drm_set_version {
570 int drm_dd_minor; 570 int drm_dd_minor;
571}; 571};
572 572
573/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
574struct drm_gem_close {
575 /** Handle of the object to be closed. */
576 uint32_t handle;
577 uint32_t pad;
578};
579
580/** DRM_IOCTL_GEM_FLINK ioctl argument type */
581struct drm_gem_flink {
582 /** Handle for the object being named */
583 uint32_t handle;
584
585 /** Returned global name */
586 uint32_t name;
587};
588
589/** DRM_IOCTL_GEM_OPEN ioctl argument type */
590struct drm_gem_open {
591 /** Name of object being opened */
592 uint32_t name;
593
594 /** Returned handle for the object */
595 uint32_t handle;
596
597 /** Returned size of the object */
598 uint64_t size;
599};
600
573#define DRM_IOCTL_BASE 'd' 601#define DRM_IOCTL_BASE 'd'
574#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) 602#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
575#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) 603#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
@@ -585,6 +613,9 @@ struct drm_set_version {
585#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) 613#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
586#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) 614#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
587#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) 615#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
616#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
617#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
618#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
588 619
589#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 620#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
590#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) 621#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e79ce0781f0b..1469a1bd8821 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -104,6 +104,7 @@ struct drm_device;
104#define DRIVER_DMA_QUEUE 0x200 104#define DRIVER_DMA_QUEUE 0x200
105#define DRIVER_FB_DMA 0x400 105#define DRIVER_FB_DMA 0x400
106#define DRIVER_IRQ_VBL2 0x800 106#define DRIVER_IRQ_VBL2 0x800
107#define DRIVER_GEM 0x1000
107 108
108/***********************************************************************/ 109/***********************************************************************/
109/** \name Begin the DRM... */ 110/** \name Begin the DRM... */
@@ -387,6 +388,10 @@ struct drm_file {
387 struct drm_minor *minor; 388 struct drm_minor *minor;
388 int remove_auth_on_close; 389 int remove_auth_on_close;
389 unsigned long lock_count; 390 unsigned long lock_count;
391 /** Mapping of mm object handles to object pointers. */
392 struct idr object_idr;
393 /** Lock for synchronization of access to object_idr. */
394 spinlock_t table_lock;
390 struct file *filp; 395 struct file *filp;
391 void *driver_priv; 396 void *driver_priv;
392}; 397};
@@ -558,6 +563,56 @@ struct drm_ati_pcigart_info {
558}; 563};
559 564
560/** 565/**
566 * This structure defines the drm_mm memory object, which will be used by the
567 * DRM for its buffer objects.
568 */
569struct drm_gem_object {
570 /** Reference count of this object */
571 struct kref refcount;
572
573 /** Handle count of this object. Each handle also holds a reference */
574 struct kref handlecount;
575
576 /** Related drm device */
577 struct drm_device *dev;
578
579 /** File representing the shmem storage */
580 struct file *filp;
581
582 /**
583 * Size of the object, in bytes. Immutable over the object's
584 * lifetime.
585 */
586 size_t size;
587
588 /**
589 * Global name for this object, starts at 1. 0 means unnamed.
590 * Access is covered by the object_name_lock in the related drm_device
591 */
592 int name;
593
594 /**
595 * Memory domains. These monitor which caches contain read/write data
596 * related to the object. When transitioning from one set of domains
597 * to another, the driver is called to ensure that caches are suitably
598 * flushed and invalidated
599 */
600 uint32_t read_domains;
601 uint32_t write_domain;
602
603 /**
604 * While validating an exec operation, the
605 * new read/write domain values are computed here.
606 * They will be transferred to the above values
607 * at the point that any cache flushing occurs
608 */
609 uint32_t pending_read_domains;
610 uint32_t pending_write_domain;
611
612 void *driver_private;
613};
614
615/**
561 * DRM driver structure. This structure represent the common code for 616 * DRM driver structure. This structure represent the common code for
562 * a family of cards. There will one drm_device for each card present 617 * a family of cards. There will one drm_device for each card present
563 * in this family 618 * in this family
@@ -657,6 +712,18 @@ struct drm_driver {
657 void (*set_version) (struct drm_device *dev, 712 void (*set_version) (struct drm_device *dev,
658 struct drm_set_version *sv); 713 struct drm_set_version *sv);
659 714
715 int (*proc_init)(struct drm_minor *minor);
716 void (*proc_cleanup)(struct drm_minor *minor);
717
718 /**
719 * Driver-specific constructor for drm_gem_objects, to set up
720 * obj->driver_private.
721 *
722 * Returns 0 on success.
723 */
724 int (*gem_init_object) (struct drm_gem_object *obj);
725 void (*gem_free_object) (struct drm_gem_object *obj);
726
660 int major; 727 int major;
661 int minor; 728 int minor;
662 int patchlevel; 729 int patchlevel;
@@ -830,6 +897,22 @@ struct drm_device {
830 spinlock_t drw_lock; 897 spinlock_t drw_lock;
831 struct idr drw_idr; 898 struct idr drw_idr;
832 /*@} */ 899 /*@} */
900
901 /** \name GEM information */
902 /*@{ */
903 spinlock_t object_name_lock;
904 struct idr object_name_idr;
905 atomic_t object_count;
906 atomic_t object_memory;
907 atomic_t pin_count;
908 atomic_t pin_memory;
909 atomic_t gtt_count;
910 atomic_t gtt_memory;
911 uint32_t gtt_total;
912 uint32_t invalidate_domains; /* domains pending invalidation */
913 uint32_t flush_domains; /* domains pending flush */
914 /*@} */
915
833}; 916};
834 917
835static __inline__ int drm_core_check_feature(struct drm_device *dev, 918static __inline__ int drm_core_check_feature(struct drm_device *dev,
@@ -926,6 +1009,10 @@ extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
926extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); 1009extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
927extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); 1010extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
928extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); 1011extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
1012extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
1013 struct page **pages,
1014 unsigned long num_pages,
1015 uint32_t gtt_offset);
929extern int drm_unbind_agp(DRM_AGP_MEM * handle); 1016extern int drm_unbind_agp(DRM_AGP_MEM * handle);
930 1017
931 /* Misc. IOCTL support (drm_ioctl.h) */ 1018 /* Misc. IOCTL support (drm_ioctl.h) */
@@ -988,6 +1075,9 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
988extern int drm_authmagic(struct drm_device *dev, void *data, 1075extern int drm_authmagic(struct drm_device *dev, void *data,
989 struct drm_file *file_priv); 1076 struct drm_file *file_priv);
990 1077
1078/* Cache management (drm_cache.c) */
1079void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
1080
991 /* Locking IOCTL support (drm_lock.h) */ 1081 /* Locking IOCTL support (drm_lock.h) */
992extern int drm_lock(struct drm_device *dev, void *data, 1082extern int drm_lock(struct drm_device *dev, void *data,
993 struct drm_file *file_priv); 1083 struct drm_file *file_priv);
@@ -1094,6 +1184,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
1094extern int drm_agp_free_memory(DRM_AGP_MEM * handle); 1184extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
1095extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); 1185extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
1096extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); 1186extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
1187extern void drm_agp_chipset_flush(struct drm_device *dev);
1097 1188
1098 /* Stub support (drm_stub.h) */ 1189 /* Stub support (drm_stub.h) */
1099extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, 1190extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
@@ -1156,6 +1247,66 @@ extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
1156extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); 1247extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
1157extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); 1248extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
1158 1249
1250/* Graphics Execution Manager library functions (drm_gem.c) */
1251int drm_gem_init(struct drm_device *dev);
1252void drm_gem_object_free(struct kref *kref);
1253struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1254 size_t size);
1255void drm_gem_object_handle_free(struct kref *kref);
1256
1257static inline void
1258drm_gem_object_reference(struct drm_gem_object *obj)
1259{
1260 kref_get(&obj->refcount);
1261}
1262
1263static inline void
1264drm_gem_object_unreference(struct drm_gem_object *obj)
1265{
1266 if (obj == NULL)
1267 return;
1268
1269 kref_put(&obj->refcount, drm_gem_object_free);
1270}
1271
1272int drm_gem_handle_create(struct drm_file *file_priv,
1273 struct drm_gem_object *obj,
1274 int *handlep);
1275
1276static inline void
1277drm_gem_object_handle_reference(struct drm_gem_object *obj)
1278{
1279 drm_gem_object_reference(obj);
1280 kref_get(&obj->handlecount);
1281}
1282
1283static inline void
1284drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1285{
1286 if (obj == NULL)
1287 return;
1288
1289 /*
1290 * Must bump handle count first as this may be the last
1291 * ref, in which case the object would disappear before we
1292 * checked for a name
1293 */
1294 kref_put(&obj->handlecount, drm_gem_object_handle_free);
1295 drm_gem_object_unreference(obj);
1296}
1297
1298struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
1299 struct drm_file *filp,
1300 int handle);
1301int drm_gem_close_ioctl(struct drm_device *dev, void *data,
1302 struct drm_file *file_priv);
1303int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1304 struct drm_file *file_priv);
1305int drm_gem_open_ioctl(struct drm_device *dev, void *data,
1306 struct drm_file *file_priv);
1307void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
1308void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1309
1159extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); 1310extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
1160extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev); 1311extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
1161extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); 1312extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 05c66cf03a9e..59d08fca25a4 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -143,6 +143,22 @@ typedef struct _drm_i915_sarea {
143#define DRM_I915_GET_VBLANK_PIPE 0x0e 143#define DRM_I915_GET_VBLANK_PIPE 0x0e
144#define DRM_I915_VBLANK_SWAP 0x0f 144#define DRM_I915_VBLANK_SWAP 0x0f
145#define DRM_I915_HWS_ADDR 0x11 145#define DRM_I915_HWS_ADDR 0x11
146#define DRM_I915_GEM_INIT 0x13
147#define DRM_I915_GEM_EXECBUFFER 0x14
148#define DRM_I915_GEM_PIN 0x15
149#define DRM_I915_GEM_UNPIN 0x16
150#define DRM_I915_GEM_BUSY 0x17
151#define DRM_I915_GEM_THROTTLE 0x18
152#define DRM_I915_GEM_ENTERVT 0x19
153#define DRM_I915_GEM_LEAVEVT 0x1a
154#define DRM_I915_GEM_CREATE 0x1b
155#define DRM_I915_GEM_PREAD 0x1c
156#define DRM_I915_GEM_PWRITE 0x1d
157#define DRM_I915_GEM_MMAP 0x1e
158#define DRM_I915_GEM_SET_DOMAIN 0x1f
159#define DRM_I915_GEM_SW_FINISH 0x20
160#define DRM_I915_GEM_SET_TILING 0x21
161#define DRM_I915_GEM_GET_TILING 0x22
146 162
147#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 163#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
148#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 164#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -160,6 +176,20 @@ typedef struct _drm_i915_sarea {
160#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 176#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
161#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 177#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
162#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 178#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
179#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
180#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
181#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
182#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
183#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
184#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
185#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
186#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
187#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
188#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
189#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
190#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
191#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
192#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
163 193
164/* Allow drivers to submit batchbuffers directly to hardware, relying 194/* Allow drivers to submit batchbuffers directly to hardware, relying
165 * on the security mechanisms provided by hardware. 195 * on the security mechanisms provided by hardware.
@@ -200,6 +230,7 @@ typedef struct drm_i915_irq_wait {
200#define I915_PARAM_IRQ_ACTIVE 1 230#define I915_PARAM_IRQ_ACTIVE 1
201#define I915_PARAM_ALLOW_BATCHBUFFER 2 231#define I915_PARAM_ALLOW_BATCHBUFFER 2
202#define I915_PARAM_LAST_DISPATCH 3 232#define I915_PARAM_LAST_DISPATCH 3
233#define I915_PARAM_HAS_GEM 5
203 234
204typedef struct drm_i915_getparam { 235typedef struct drm_i915_getparam {
205 int param; 236 int param;
@@ -267,4 +298,305 @@ typedef struct drm_i915_hws_addr {
267 uint64_t addr; 298 uint64_t addr;
268} drm_i915_hws_addr_t; 299} drm_i915_hws_addr_t;
269 300
301struct drm_i915_gem_init {
302 /**
303 * Beginning offset in the GTT to be managed by the DRM memory
304 * manager.
305 */
306 uint64_t gtt_start;
307 /**
308 * Ending offset in the GTT to be managed by the DRM memory
309 * manager.
310 */
311 uint64_t gtt_end;
312};
313
314struct drm_i915_gem_create {
315 /**
316 * Requested size for the object.
317 *
318 * The (page-aligned) allocated size for the object will be returned.
319 */
320 uint64_t size;
321 /**
322 * Returned handle for the object.
323 *
324 * Object handles are nonzero.
325 */
326 uint32_t handle;
327 uint32_t pad;
328};
329
330struct drm_i915_gem_pread {
331 /** Handle for the object being read. */
332 uint32_t handle;
333 uint32_t pad;
334 /** Offset into the object to read from */
335 uint64_t offset;
336 /** Length of data to read */
337 uint64_t size;
338 /**
339 * Pointer to write the data into.
340 *
341 * This is a fixed-size type for 32/64 compatibility.
342 */
343 uint64_t data_ptr;
344};
345
346struct drm_i915_gem_pwrite {
347 /** Handle for the object being written to. */
348 uint32_t handle;
349 uint32_t pad;
350 /** Offset into the object to write to */
351 uint64_t offset;
352 /** Length of data to write */
353 uint64_t size;
354 /**
355 * Pointer to read the data from.
356 *
357 * This is a fixed-size type for 32/64 compatibility.
358 */
359 uint64_t data_ptr;
360};
361
362struct drm_i915_gem_mmap {
363 /** Handle for the object being mapped. */
364 uint32_t handle;
365 uint32_t pad;
366 /** Offset in the object to map. */
367 uint64_t offset;
368 /**
369 * Length of data to map.
370 *
371 * The value will be page-aligned.
372 */
373 uint64_t size;
374 /**
375 * Returned pointer the data was mapped at.
376 *
377 * This is a fixed-size type for 32/64 compatibility.
378 */
379 uint64_t addr_ptr;
380};
381
382struct drm_i915_gem_set_domain {
383 /** Handle for the object */
384 uint32_t handle;
385
386 /** New read domains */
387 uint32_t read_domains;
388
389 /** New write domain */
390 uint32_t write_domain;
391};
392
393struct drm_i915_gem_sw_finish {
394 /** Handle for the object */
395 uint32_t handle;
396};
397
398struct drm_i915_gem_relocation_entry {
399 /**
400 * Handle of the buffer being pointed to by this relocation entry.
401 *
402 * It's appealing to make this be an index into the mm_validate_entry
403 * list to refer to the buffer, but this allows the driver to create
404 * a relocation list for state buffers and not re-write it per
405 * exec using the buffer.
406 */
407 uint32_t target_handle;
408
409 /**
410 * Value to be added to the offset of the target buffer to make up
411 * the relocation entry.
412 */
413 uint32_t delta;
414
415 /** Offset in the buffer the relocation entry will be written into */
416 uint64_t offset;
417
418 /**
419 * Offset value of the target buffer that the relocation entry was last
420 * written as.
421 *
422 * If the buffer has the same offset as last time, we can skip syncing
423 * and writing the relocation. This value is written back out by
424 * the execbuffer ioctl when the relocation is written.
425 */
426 uint64_t presumed_offset;
427
428 /**
429 * Target memory domains read by this operation.
430 */
431 uint32_t read_domains;
432
433 /**
434 * Target memory domains written by this operation.
435 *
436 * Note that only one domain may be written by the whole
437 * execbuffer operation, so that where there are conflicts,
438 * the application will get -EINVAL back.
439 */
440 uint32_t write_domain;
441};
442
443/** @{
444 * Intel memory domains
445 *
446 * Most of these just align with the various caches in
447 * the system and are used to flush and invalidate as
448 * objects end up cached in different domains.
449 */
450/** CPU cache */
451#define I915_GEM_DOMAIN_CPU 0x00000001
452/** Render cache, used by 2D and 3D drawing */
453#define I915_GEM_DOMAIN_RENDER 0x00000002
454/** Sampler cache, used by texture engine */
455#define I915_GEM_DOMAIN_SAMPLER 0x00000004
456/** Command queue, used to load batch buffers */
457#define I915_GEM_DOMAIN_COMMAND 0x00000008
458/** Instruction cache, used by shader programs */
459#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
460/** Vertex address cache */
461#define I915_GEM_DOMAIN_VERTEX 0x00000020
462/** GTT domain - aperture and scanout */
463#define I915_GEM_DOMAIN_GTT 0x00000040
464/** @} */
465
466struct drm_i915_gem_exec_object {
467 /**
468 * User's handle for a buffer to be bound into the GTT for this
469 * operation.
470 */
471 uint32_t handle;
472
473 /** Number of relocations to be performed on this buffer */
474 uint32_t relocation_count;
475 /**
476 * Pointer to array of struct drm_i915_gem_relocation_entry containing
477 * the relocations to be performed in this buffer.
478 */
479 uint64_t relocs_ptr;
480
481 /** Required alignment in graphics aperture */
482 uint64_t alignment;
483
484 /**
485 * Returned value of the updated offset of the object, for future
486 * presumed_offset writes.
487 */
488 uint64_t offset;
489};
490
491struct drm_i915_gem_execbuffer {
492 /**
493 * List of buffers to be validated with their relocations to be
494 * performend on them.
495 *
496 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
497 *
498 * These buffers must be listed in an order such that all relocations
499 * a buffer is performing refer to buffers that have already appeared
500 * in the validate list.
501 */
502 uint64_t buffers_ptr;
503 uint32_t buffer_count;
504
505 /** Offset in the batchbuffer to start execution from. */
506 uint32_t batch_start_offset;
507 /** Bytes used in batchbuffer from batch_start_offset */
508 uint32_t batch_len;
509 uint32_t DR1;
510 uint32_t DR4;
511 uint32_t num_cliprects;
512 /** This is a struct drm_clip_rect *cliprects */
513 uint64_t cliprects_ptr;
514};
515
516struct drm_i915_gem_pin {
517 /** Handle of the buffer to be pinned. */
518 uint32_t handle;
519 uint32_t pad;
520
521 /** alignment required within the aperture */
522 uint64_t alignment;
523
524 /** Returned GTT offset of the buffer. */
525 uint64_t offset;
526};
527
528struct drm_i915_gem_unpin {
529 /** Handle of the buffer to be unpinned. */
530 uint32_t handle;
531 uint32_t pad;
532};
533
534struct drm_i915_gem_busy {
535 /** Handle of the buffer to check for busy */
536 uint32_t handle;
537
538 /** Return busy status (1 if busy, 0 if idle) */
539 uint32_t busy;
540};
541
542#define I915_TILING_NONE 0
543#define I915_TILING_X 1
544#define I915_TILING_Y 2
545
546#define I915_BIT_6_SWIZZLE_NONE 0
547#define I915_BIT_6_SWIZZLE_9 1
548#define I915_BIT_6_SWIZZLE_9_10 2
549#define I915_BIT_6_SWIZZLE_9_11 3
550#define I915_BIT_6_SWIZZLE_9_10_11 4
551/* Not seen by userland */
552#define I915_BIT_6_SWIZZLE_UNKNOWN 5
553
554struct drm_i915_gem_set_tiling {
555 /** Handle of the buffer to have its tiling state updated */
556 uint32_t handle;
557
558 /**
559 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
560 * I915_TILING_Y).
561 *
562 * This value is to be set on request, and will be updated by the
563 * kernel on successful return with the actual chosen tiling layout.
564 *
565 * The tiling mode may be demoted to I915_TILING_NONE when the system
566 * has bit 6 swizzling that can't be managed correctly by GEM.
567 *
568 * Buffer contents become undefined when changing tiling_mode.
569 */
570 uint32_t tiling_mode;
571
572 /**
573 * Stride in bytes for the object when in I915_TILING_X or
574 * I915_TILING_Y.
575 */
576 uint32_t stride;
577
578 /**
579 * Returned address bit 6 swizzling required for CPU access through
580 * mmap mapping.
581 */
582 uint32_t swizzle_mode;
583};
584
585struct drm_i915_gem_get_tiling {
586 /** Handle of the buffer to get tiling state for. */
587 uint32_t handle;
588
589 /**
590 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
591 * I915_TILING_Y).
592 */
593 uint32_t tiling_mode;
594
595 /**
596 * Returned address bit 6 swizzling required for CPU access through
597 * mmap mapping.
598 */
599 uint32_t swizzle_mode;
600};
601
270#endif /* _I915_DRM_H_ */ 602#endif /* _I915_DRM_H_ */