aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_drv.h
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-07-30 15:06:12 -0400
committerDave Airlie <airlied@linux.ie>2008-10-17 17:10:12 -0400
commit673a394b1e3b69be886ff24abfd6df97c52e8d08 (patch)
tree61ca8299333ab50ffc46cf328b20eb25133392ff /drivers/gpu/drm/i915/i915_drv.h
parentd1d8c925b71dd6753bf438f9e14a9e5c5183bcc6 (diff)
drm: Add GEM ("graphics execution manager") to i915 driver.
GEM allows the creation of persistent buffer objects accessible by the graphics device through new ioctls for managing execution of commands on the device. The userland API is almost entirely driver-specific to ensure that any driver building on this model can easily map the interface to individual driver requirements. GEM is used by the 2d driver for managing its internal state allocations and will be used for pixmap storage to reduce memory consumption and enable zero-copy GLX_EXT_texture_from_pixmap, and in the 3d driver is used to enable GL_EXT_framebuffer_object and GL_ARB_pixel_buffer_object. Signed-off-by: Eric Anholt <eric@anholt.net> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h253
1 files changed, 249 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d1a02bead458..87b071ab8647 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -39,7 +39,7 @@
39 39
40#define DRIVER_NAME "i915" 40#define DRIVER_NAME "i915"
41#define DRIVER_DESC "Intel Graphics" 41#define DRIVER_DESC "Intel Graphics"
42#define DRIVER_DATE "20060119" 42#define DRIVER_DATE "20080730"
43 43
44enum pipe { 44enum pipe {
45 PIPE_A = 0, 45 PIPE_A = 0,
@@ -60,16 +60,23 @@ enum pipe {
60#define DRIVER_MINOR 6 60#define DRIVER_MINOR 6
61#define DRIVER_PATCHLEVEL 0 61#define DRIVER_PATCHLEVEL 0
62 62
63#define WATCH_COHERENCY 0
64#define WATCH_BUF 0
65#define WATCH_EXEC 0
66#define WATCH_LRU 0
67#define WATCH_RELOC 0
68#define WATCH_INACTIVE 0
69#define WATCH_PWRITE 0
70
63typedef struct _drm_i915_ring_buffer { 71typedef struct _drm_i915_ring_buffer {
64 int tail_mask; 72 int tail_mask;
65 unsigned long Start;
66 unsigned long End;
67 unsigned long Size; 73 unsigned long Size;
68 u8 *virtual_start; 74 u8 *virtual_start;
69 int head; 75 int head;
70 int tail; 76 int tail;
71 int space; 77 int space;
72 drm_local_map_t map; 78 drm_local_map_t map;
79 struct drm_gem_object *ring_obj;
73} drm_i915_ring_buffer_t; 80} drm_i915_ring_buffer_t;
74 81
75struct mem_block { 82struct mem_block {
@@ -101,6 +108,8 @@ struct intel_opregion {
101}; 108};
102 109
103typedef struct drm_i915_private { 110typedef struct drm_i915_private {
111 struct drm_device *dev;
112
104 drm_local_map_t *sarea; 113 drm_local_map_t *sarea;
105 drm_local_map_t *mmio_map; 114 drm_local_map_t *mmio_map;
106 115
@@ -113,6 +122,7 @@ typedef struct drm_i915_private {
113 uint32_t counter; 122 uint32_t counter;
114 unsigned int status_gfx_addr; 123 unsigned int status_gfx_addr;
115 drm_local_map_t hws_map; 124 drm_local_map_t hws_map;
125 struct drm_gem_object *hws_obj;
116 126
117 unsigned int cpp; 127 unsigned int cpp;
118 int back_offset; 128 int back_offset;
@@ -122,7 +132,6 @@ typedef struct drm_i915_private {
122 132
123 wait_queue_head_t irq_queue; 133 wait_queue_head_t irq_queue;
124 atomic_t irq_received; 134 atomic_t irq_received;
125 atomic_t irq_emitted;
126 /** Protects user_irq_refcount and irq_mask_reg */ 135 /** Protects user_irq_refcount and irq_mask_reg */
127 spinlock_t user_irq_lock; 136 spinlock_t user_irq_lock;
128 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ 137 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
@@ -230,8 +239,174 @@ typedef struct drm_i915_private {
230 u8 saveDACMASK; 239 u8 saveDACMASK;
231 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 240 u8 saveDACDATA[256*3]; /* 256 3-byte colors */
232 u8 saveCR[37]; 241 u8 saveCR[37];
242
243 struct {
244 struct drm_mm gtt_space;
245
246 /**
247 * List of objects currently involved in rendering from the
248 * ringbuffer.
249 *
250 * A reference is held on the buffer while on this list.
251 */
252 struct list_head active_list;
253
254 /**
255 * List of objects which are not in the ringbuffer but which
256 * still have a write_domain which needs to be flushed before
257 * unbinding.
258 *
259 * A reference is held on the buffer while on this list.
260 */
261 struct list_head flushing_list;
262
263 /**
264 * LRU list of objects which are not in the ringbuffer and
265 * are ready to unbind, but are still in the GTT.
266 *
267 * A reference is not held on the buffer while on this list,
268 * as merely being GTT-bound shouldn't prevent its being
269 * freed, and we'll pull it off the list in the free path.
270 */
271 struct list_head inactive_list;
272
273 /**
274 * List of breadcrumbs associated with GPU requests currently
275 * outstanding.
276 */
277 struct list_head request_list;
278
279 /**
280 * We leave the user IRQ off as much as possible,
281 * but this means that requests will finish and never
282 * be retired once the system goes idle. Set a timer to
283 * fire periodically while the ring is running. When it
284 * fires, go retire requests.
285 */
286 struct delayed_work retire_work;
287
288 uint32_t next_gem_seqno;
289
290 /**
291 * Waiting sequence number, if any
292 */
293 uint32_t waiting_gem_seqno;
294
295 /**
296 * Last seq seen at irq time
297 */
298 uint32_t irq_gem_seqno;
299
300 /**
301 * Flag if the X Server, and thus DRM, is not currently in
302 * control of the device.
303 *
304 * This is set between LeaveVT and EnterVT. It needs to be
305 * replaced with a semaphore. It also needs to be
306 * transitioned away from for kernel modesetting.
307 */
308 int suspended;
309
310 /**
311 * Flag if the hardware appears to be wedged.
312 *
313 * This is set when attempts to idle the device timeout.
314 * It prevents command submission from occuring and makes
315 * every pending request fail
316 */
317 int wedged;
318
319 /** Bit 6 swizzling required for X tiling */
320 uint32_t bit_6_swizzle_x;
321 /** Bit 6 swizzling required for Y tiling */
322 uint32_t bit_6_swizzle_y;
323 } mm;
233} drm_i915_private_t; 324} drm_i915_private_t;
234 325
326/** driver private structure attached to each drm_gem_object */
327struct drm_i915_gem_object {
328 struct drm_gem_object *obj;
329
330 /** Current space allocated to this object in the GTT, if any. */
331 struct drm_mm_node *gtt_space;
332
333 /** This object's place on the active/flushing/inactive lists */
334 struct list_head list;
335
336 /**
337 * This is set if the object is on the active or flushing lists
338 * (has pending rendering), and is not set if it's on inactive (ready
339 * to be unbound).
340 */
341 int active;
342
343 /**
344 * This is set if the object has been written to since last bound
345 * to the GTT
346 */
347 int dirty;
348
349 /** AGP memory structure for our GTT binding. */
350 DRM_AGP_MEM *agp_mem;
351
352 struct page **page_list;
353
354 /**
355 * Current offset of the object in GTT space.
356 *
357 * This is the same as gtt_space->start
358 */
359 uint32_t gtt_offset;
360
361 /** Boolean whether this object has a valid gtt offset. */
362 int gtt_bound;
363
364 /** How many users have pinned this object in GTT space */
365 int pin_count;
366
367 /** Breadcrumb of last rendering to the buffer. */
368 uint32_t last_rendering_seqno;
369
370 /** Current tiling mode for the object. */
371 uint32_t tiling_mode;
372
373 /**
374 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
375 * GEM_DOMAIN_CPU is not in the object's read domain.
376 */
377 uint8_t *page_cpu_valid;
378};
379
380/**
381 * Request queue structure.
382 *
383 * The request queue allows us to note sequence numbers that have been emitted
384 * and may be associated with active buffers to be retired.
385 *
386 * By keeping this list, we can avoid having to do questionable
387 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
388 * an emission time with seqnos for tracking how far ahead of the GPU we are.
389 */
390struct drm_i915_gem_request {
391 /** GEM sequence number associated with this request. */
392 uint32_t seqno;
393
394 /** Time at which this request was emitted, in jiffies. */
395 unsigned long emitted_jiffies;
396
397 /** Cache domains that were flushed at the start of the request. */
398 uint32_t flush_domains;
399
400 struct list_head list;
401};
402
403struct drm_i915_file_private {
404 struct {
405 uint32_t last_gem_seqno;
406 uint32_t last_gem_throttle_seqno;
407 } mm;
408};
409
235extern struct drm_ioctl_desc i915_ioctls[]; 410extern struct drm_ioctl_desc i915_ioctls[];
236extern int i915_max_ioctl; 411extern int i915_max_ioctl;
237 412
@@ -239,18 +414,26 @@ extern int i915_max_ioctl;
239extern void i915_kernel_lost_context(struct drm_device * dev); 414extern void i915_kernel_lost_context(struct drm_device * dev);
240extern int i915_driver_load(struct drm_device *, unsigned long flags); 415extern int i915_driver_load(struct drm_device *, unsigned long flags);
241extern int i915_driver_unload(struct drm_device *); 416extern int i915_driver_unload(struct drm_device *);
417extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
242extern void i915_driver_lastclose(struct drm_device * dev); 418extern void i915_driver_lastclose(struct drm_device * dev);
243extern void i915_driver_preclose(struct drm_device *dev, 419extern void i915_driver_preclose(struct drm_device *dev,
244 struct drm_file *file_priv); 420 struct drm_file *file_priv);
421extern void i915_driver_postclose(struct drm_device *dev,
422 struct drm_file *file_priv);
245extern int i915_driver_device_is_agp(struct drm_device * dev); 423extern int i915_driver_device_is_agp(struct drm_device * dev);
246extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 424extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
247 unsigned long arg); 425 unsigned long arg);
426extern int i915_emit_box(struct drm_device *dev,
427 struct drm_clip_rect __user *boxes,
428 int i, int DR1, int DR4);
248 429
249/* i915_irq.c */ 430/* i915_irq.c */
250extern int i915_irq_emit(struct drm_device *dev, void *data, 431extern int i915_irq_emit(struct drm_device *dev, void *data,
251 struct drm_file *file_priv); 432 struct drm_file *file_priv);
252extern int i915_irq_wait(struct drm_device *dev, void *data, 433extern int i915_irq_wait(struct drm_device *dev, void *data,
253 struct drm_file *file_priv); 434 struct drm_file *file_priv);
435void i915_user_irq_get(struct drm_device *dev);
436void i915_user_irq_put(struct drm_device *dev);
254 437
255extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 438extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
256extern void i915_driver_irq_preinstall(struct drm_device * dev); 439extern void i915_driver_irq_preinstall(struct drm_device * dev);
@@ -279,6 +462,67 @@ extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
279extern void i915_mem_takedown(struct mem_block **heap); 462extern void i915_mem_takedown(struct mem_block **heap);
280extern void i915_mem_release(struct drm_device * dev, 463extern void i915_mem_release(struct drm_device * dev,
281 struct drm_file *file_priv, struct mem_block *heap); 464 struct drm_file *file_priv, struct mem_block *heap);
465/* i915_gem.c */
466int i915_gem_init_ioctl(struct drm_device *dev, void *data,
467 struct drm_file *file_priv);
468int i915_gem_create_ioctl(struct drm_device *dev, void *data,
469 struct drm_file *file_priv);
470int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
471 struct drm_file *file_priv);
472int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
473 struct drm_file *file_priv);
474int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
475 struct drm_file *file_priv);
476int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
477 struct drm_file *file_priv);
478int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
479 struct drm_file *file_priv);
480int i915_gem_execbuffer(struct drm_device *dev, void *data,
481 struct drm_file *file_priv);
482int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
483 struct drm_file *file_priv);
484int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
485 struct drm_file *file_priv);
486int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
487 struct drm_file *file_priv);
488int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
489 struct drm_file *file_priv);
490int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
491 struct drm_file *file_priv);
492int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
493 struct drm_file *file_priv);
494int i915_gem_set_tiling(struct drm_device *dev, void *data,
495 struct drm_file *file_priv);
496int i915_gem_get_tiling(struct drm_device *dev, void *data,
497 struct drm_file *file_priv);
498void i915_gem_load(struct drm_device *dev);
499int i915_gem_proc_init(struct drm_minor *minor);
500void i915_gem_proc_cleanup(struct drm_minor *minor);
501int i915_gem_init_object(struct drm_gem_object *obj);
502void i915_gem_free_object(struct drm_gem_object *obj);
503int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
504void i915_gem_object_unpin(struct drm_gem_object *obj);
505void i915_gem_lastclose(struct drm_device *dev);
506uint32_t i915_get_gem_seqno(struct drm_device *dev);
507void i915_gem_retire_requests(struct drm_device *dev);
508void i915_gem_retire_work_handler(struct work_struct *work);
509void i915_gem_clflush_object(struct drm_gem_object *obj);
510
511/* i915_gem_tiling.c */
512void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
513
514/* i915_gem_debug.c */
515void i915_gem_dump_object(struct drm_gem_object *obj, int len,
516 const char *where, uint32_t mark);
517#if WATCH_INACTIVE
518void i915_verify_inactive(struct drm_device *dev, char *file, int line);
519#else
520#define i915_verify_inactive(dev, file, line)
521#endif
522void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
523void i915_gem_dump_object(struct drm_gem_object *obj, int len,
524 const char *where, uint32_t mark);
525void i915_dump_lru(struct drm_device *dev, const char *where);
282 526
283/* i915_suspend.c */ 527/* i915_suspend.c */
284extern int i915_save_state(struct drm_device *dev); 528extern int i915_save_state(struct drm_device *dev);
@@ -347,6 +591,7 @@ extern void opregion_enable_asle(struct drm_device *dev);
347 */ 591 */
348#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 592#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
349#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5) 593#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5)
594#define I915_GEM_HWS_INDEX 0x10
350 595
351extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 596extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
352 597