aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/drm/drmP.h93
-rw-r--r--include/drm/drm_crtc.h9
-rw-r--r--include/drm/drm_pciids.h4
-rw-r--r--include/drm/nouveau_drm.h5
-rw-r--r--include/drm/ttm/ttm_bo_api.h50
-rw-r--r--include/drm/ttm/ttm_bo_driver.h152
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h11
-rw-r--r--include/linux/kref.h2
8 files changed, 265 insertions, 61 deletions
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 628f76772d22..0f14f94ed8f4 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -683,6 +683,21 @@ struct drm_master {
683 void *driver_priv; /**< Private structure for driver to use */ 683 void *driver_priv; /**< Private structure for driver to use */
684}; 684};
685 685
686/* Size of ringbuffer for vblank timestamps. Just double-buffer
687 * in initial implementation.
688 */
689#define DRM_VBLANKTIME_RBSIZE 2
690
691/* Flags and return codes for get_vblank_timestamp() driver function. */
692#define DRM_CALLED_FROM_VBLIRQ 1
693#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
694#define DRM_VBLANKTIME_INVBL (1 << 1)
695
696/* get_scanout_position() return flags */
697#define DRM_SCANOUTPOS_VALID (1 << 0)
698#define DRM_SCANOUTPOS_INVBL (1 << 1)
699#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
700
686/** 701/**
687 * DRM driver structure. This structure represent the common code for 702 * DRM driver structure. This structure represent the common code for
688 * a family of cards. There will one drm_device for each card present 703 * a family of cards. There will one drm_device for each card present
@@ -760,6 +775,68 @@ struct drm_driver {
760 */ 775 */
761 int (*device_is_agp) (struct drm_device *dev); 776 int (*device_is_agp) (struct drm_device *dev);
762 777
778 /**
779 * Called by vblank timestamping code.
780 *
781 * Return the current display scanout position from a crtc.
782 *
783 * \param dev DRM device.
784 * \param crtc Id of the crtc to query.
785 * \param *vpos Target location for current vertical scanout position.
786 * \param *hpos Target location for current horizontal scanout position.
787 *
788 * Returns vpos as a positive number while in active scanout area.
789 * Returns vpos as a negative number inside vblank, counting the number
790 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
791 * until start of active scanout / end of vblank."
792 *
793 * \return Flags, or'ed together as follows:
794 *
795 * DRM_SCANOUTPOS_VALID = Query successfull.
796 * DRM_SCANOUTPOS_INVBL = Inside vblank.
797 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
798 * this flag means that returned position may be offset by a constant
799 * but unknown small number of scanlines wrt. real scanout position.
800 *
801 */
802 int (*get_scanout_position) (struct drm_device *dev, int crtc,
803 int *vpos, int *hpos);
804
805 /**
806 * Called by \c drm_get_last_vbltimestamp. Should return a precise
807 * timestamp when the most recent VBLANK interval ended or will end.
808 *
809 * Specifically, the timestamp in @vblank_time should correspond as
810 * closely as possible to the time when the first video scanline of
811 * the video frame after the end of VBLANK will start scanning out,
812 * the time immmediately after end of the VBLANK interval. If the
813 * @crtc is currently inside VBLANK, this will be a time in the future.
814 * If the @crtc is currently scanning out a frame, this will be the
815 * past start time of the current scanout. This is meant to adhere
816 * to the OpenML OML_sync_control extension specification.
817 *
818 * \param dev dev DRM device handle.
819 * \param crtc crtc for which timestamp should be returned.
820 * \param *max_error Maximum allowable timestamp error in nanoseconds.
821 * Implementation should strive to provide timestamp
822 * with an error of at most *max_error nanoseconds.
823 * Returns true upper bound on error for timestamp.
824 * \param *vblank_time Target location for returned vblank timestamp.
825 * \param flags 0 = Defaults, no special treatment needed.
826 * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
827 * irq handler. Some drivers need to apply some workarounds
828 * for gpu-specific vblank irq quirks if flag is set.
829 *
830 * \returns
831 * Zero if timestamping isn't supported in current display mode or a
832 * negative number on failure. A positive status code on success,
833 * which describes how the vblank_time timestamp was computed.
834 */
835 int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
836 int *max_error,
837 struct timeval *vblank_time,
838 unsigned flags);
839
763 /* these have to be filled in */ 840 /* these have to be filled in */
764 841
765 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); 842 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
@@ -983,6 +1060,8 @@ struct drm_device {
983 1060
984 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ 1061 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
985 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ 1062 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
1063 struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
1064 spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
986 spinlock_t vbl_lock; 1065 spinlock_t vbl_lock;
987 atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ 1066 atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
988 u32 *last_vblank; /* protected by dev->vbl_lock, used */ 1067 u32 *last_vblank; /* protected by dev->vbl_lock, used */
@@ -1282,11 +1361,22 @@ extern int drm_wait_vblank(struct drm_device *dev, void *data,
1282 struct drm_file *filp); 1361 struct drm_file *filp);
1283extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 1362extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
1284extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 1363extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1364extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
1365 struct timeval *vblanktime);
1285extern void drm_handle_vblank(struct drm_device *dev, int crtc); 1366extern void drm_handle_vblank(struct drm_device *dev, int crtc);
1286extern int drm_vblank_get(struct drm_device *dev, int crtc); 1367extern int drm_vblank_get(struct drm_device *dev, int crtc);
1287extern void drm_vblank_put(struct drm_device *dev, int crtc); 1368extern void drm_vblank_put(struct drm_device *dev, int crtc);
1288extern void drm_vblank_off(struct drm_device *dev, int crtc); 1369extern void drm_vblank_off(struct drm_device *dev, int crtc);
1289extern void drm_vblank_cleanup(struct drm_device *dev); 1370extern void drm_vblank_cleanup(struct drm_device *dev);
1371extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
1372 struct timeval *tvblank, unsigned flags);
1373extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
1374 int crtc, int *max_error,
1375 struct timeval *vblank_time,
1376 unsigned flags,
1377 struct drm_crtc *refcrtc);
1378extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
1379
1290/* Modesetting support */ 1380/* Modesetting support */
1291extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 1381extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
1292extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); 1382extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
@@ -1337,6 +1427,9 @@ extern void drm_put_dev(struct drm_device *dev);
1337extern int drm_put_minor(struct drm_minor **minor); 1427extern int drm_put_minor(struct drm_minor **minor);
1338extern unsigned int drm_debug; 1428extern unsigned int drm_debug;
1339 1429
1430extern unsigned int drm_vblank_offdelay;
1431extern unsigned int drm_timestamp_precision;
1432
1340extern struct class *drm_class; 1433extern struct class *drm_class;
1341extern struct proc_dir_entry *drm_proc_root; 1434extern struct proc_dir_entry *drm_proc_root;
1342extern struct dentry *drm_debugfs_root; 1435extern struct dentry *drm_debugfs_root;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 029aa688e787..acd7fade160d 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -351,8 +351,14 @@ struct drm_crtc {
351 351
352 bool enabled; 352 bool enabled;
353 353
354 /* Requested mode from modesetting. */
354 struct drm_display_mode mode; 355 struct drm_display_mode mode;
355 356
357 /* Programmed mode in hw, after adjustments for encoders,
358 * crtc, panel scaling etc. Needed for timestamping etc.
359 */
360 struct drm_display_mode hwmode;
361
356 int x, y; 362 int x, y;
357 const struct drm_crtc_funcs *funcs; 363 const struct drm_crtc_funcs *funcs;
358 364
@@ -360,6 +366,9 @@ struct drm_crtc {
360 uint32_t gamma_size; 366 uint32_t gamma_size;
361 uint16_t *gamma_store; 367 uint16_t *gamma_store;
362 368
369 /* Constants needed for precise vblank and swap timestamping. */
370 s64 framedur_ns, linedur_ns, pixeldur_ns;
371
363 /* if you are using the helper */ 372 /* if you are using the helper */
364 void *helper_private; 373 void *helper_private;
365}; 374};
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 883c1d439899..e6b28a39942f 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -419,6 +419,10 @@
419 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 419 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
420 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 420 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
421 {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 421 {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
422 {0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
423 {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
424 {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
425 {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
422 {0, 0, 0} 426 {0, 0, 0}
423 427
424#define r128_PCI_IDS \ 428#define r128_PCI_IDS \
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index bc5590b1a1ac..e2cfe80f6fca 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -71,16 +71,14 @@ struct drm_nouveau_gpuobj_free {
71#define NOUVEAU_GETPARAM_PCI_VENDOR 3 71#define NOUVEAU_GETPARAM_PCI_VENDOR 3
72#define NOUVEAU_GETPARAM_PCI_DEVICE 4 72#define NOUVEAU_GETPARAM_PCI_DEVICE 4
73#define NOUVEAU_GETPARAM_BUS_TYPE 5 73#define NOUVEAU_GETPARAM_BUS_TYPE 5
74#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
75#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
76#define NOUVEAU_GETPARAM_FB_SIZE 8 74#define NOUVEAU_GETPARAM_FB_SIZE 8
77#define NOUVEAU_GETPARAM_AGP_SIZE 9 75#define NOUVEAU_GETPARAM_AGP_SIZE 9
78#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
79#define NOUVEAU_GETPARAM_CHIPSET_ID 11 76#define NOUVEAU_GETPARAM_CHIPSET_ID 11
80#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 77#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
81#define NOUVEAU_GETPARAM_GRAPH_UNITS 13 78#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
82#define NOUVEAU_GETPARAM_PTIMER_TIME 14 79#define NOUVEAU_GETPARAM_PTIMER_TIME 14
83#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 80#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
81#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
84struct drm_nouveau_getparam { 82struct drm_nouveau_getparam {
85 uint64_t param; 83 uint64_t param;
86 uint64_t value; 84 uint64_t value;
@@ -171,7 +169,6 @@ struct drm_nouveau_gem_pushbuf {
171}; 169};
172 170
173#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 171#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
174#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
175#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 172#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
176struct drm_nouveau_gem_cpu_prep { 173struct drm_nouveau_gem_cpu_prep {
177 uint32_t handle; 174 uint32_t handle;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index beafc156a535..50852aad260a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -74,6 +74,8 @@ struct ttm_placement {
74 * @is_iomem: is this io memory ? 74 * @is_iomem: is this io memory ?
75 * @size: size in byte 75 * @size: size in byte
76 * @offset: offset from the base address 76 * @offset: offset from the base address
77 * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
78 * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
77 * 79 *
78 * Structure indicating the bus placement of an object. 80 * Structure indicating the bus placement of an object.
79 */ 81 */
@@ -83,7 +85,8 @@ struct ttm_bus_placement {
83 unsigned long size; 85 unsigned long size;
84 unsigned long offset; 86 unsigned long offset;
85 bool is_iomem; 87 bool is_iomem;
86 bool io_reserved; 88 bool io_reserved_vm;
89 uint64_t io_reserved_count;
87}; 90};
88 91
89 92
@@ -154,7 +157,6 @@ struct ttm_tt;
154 * keeps one refcount. When this refcount reaches zero, 157 * keeps one refcount. When this refcount reaches zero,
155 * the object is destroyed. 158 * the object is destroyed.
156 * @event_queue: Queue for processes waiting on buffer object status change. 159 * @event_queue: Queue for processes waiting on buffer object status change.
157 * @lock: spinlock protecting mostly synchronization members.
158 * @mem: structure describing current placement. 160 * @mem: structure describing current placement.
159 * @persistant_swap_storage: Usually the swap storage is deleted for buffers 161 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
160 * pinned in physical memory. If this behaviour is not desired, this member 162 * pinned in physical memory. If this behaviour is not desired, this member
@@ -213,7 +215,6 @@ struct ttm_buffer_object {
213 struct kref kref; 215 struct kref kref;
214 struct kref list_kref; 216 struct kref list_kref;
215 wait_queue_head_t event_queue; 217 wait_queue_head_t event_queue;
216 spinlock_t lock;
217 218
218 /** 219 /**
219 * Members protected by the bo::reserved lock. 220 * Members protected by the bo::reserved lock.
@@ -237,6 +238,7 @@ struct ttm_buffer_object {
237 struct list_head lru; 238 struct list_head lru;
238 struct list_head ddestroy; 239 struct list_head ddestroy;
239 struct list_head swap; 240 struct list_head swap;
241 struct list_head io_reserve_lru;
240 uint32_t val_seq; 242 uint32_t val_seq;
241 bool seq_valid; 243 bool seq_valid;
242 244
@@ -248,10 +250,10 @@ struct ttm_buffer_object {
248 atomic_t reserved; 250 atomic_t reserved;
249 251
250 /** 252 /**
251 * Members protected by the bo::lock 253 * Members protected by struct buffer_object_device::fence_lock
252 * In addition, setting sync_obj to anything else 254 * In addition, setting sync_obj to anything else
253 * than NULL requires bo::reserved to be held. This allows for 255 * than NULL requires bo::reserved to be held. This allows for
254 * checking NULL while reserved but not holding bo::lock. 256 * checking NULL while reserved but not holding the mentioned lock.
255 */ 257 */
256 258
257 void *sync_obj_arg; 259 void *sync_obj_arg;
@@ -364,6 +366,44 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
364 */ 366 */
365extern void ttm_bo_unref(struct ttm_buffer_object **bo); 367extern void ttm_bo_unref(struct ttm_buffer_object **bo);
366 368
369
370/**
371 * ttm_bo_list_ref_sub
372 *
373 * @bo: The buffer object.
374 * @count: The number of references with which to decrease @bo::list_kref;
375 * @never_free: The refcount should not reach zero with this operation.
376 *
377 * Release @count lru list references to this buffer object.
378 */
379extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
380 bool never_free);
381
382/**
383 * ttm_bo_add_to_lru
384 *
385 * @bo: The buffer object.
386 *
387 * Add this bo to the relevant mem type lru and, if it's backed by
388 * system pages (ttms) to the swap list.
389 * This function must be called with struct ttm_bo_global::lru_lock held, and
390 * is typically called immediately prior to unreserving a bo.
391 */
392extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
393
394/**
395 * ttm_bo_del_from_lru
396 *
397 * @bo: The buffer object.
398 *
399 * Remove this bo from all lru lists used to lookup and reserve an object.
400 * This function must be called with struct ttm_bo_global::lru_lock held,
401 * and is usually called just immediately after the bo has been reserved to
402 * avoid recursive reservation from lru lists.
403 */
404extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
405
406
367/** 407/**
368 * ttm_bo_lock_delayed_workqueue 408 * ttm_bo_lock_delayed_workqueue
369 * 409 *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 8e0c848326b6..1da8af6ac884 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -179,30 +179,6 @@ struct ttm_tt {
179#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 179#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
181 181
182/**
183 * struct ttm_mem_type_manager
184 *
185 * @has_type: The memory type has been initialized.
186 * @use_type: The memory type is enabled.
187 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
188 * managed by this memory type.
189 * @gpu_offset: If used, the GPU offset of the first managed page of
190 * fixed memory or the first managed location in an aperture.
191 * @size: Size of the managed region.
192 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
193 * as defined in ttm_placement_common.h
194 * @default_caching: The default caching policy used for a buffer object
195 * placed in this memory type if the user doesn't provide one.
196 * @manager: The range manager used for this memory type. FIXME: If the aperture
197 * has a page size different from the underlying system, the granularity
198 * of this manager should take care of this. But the range allocating code
199 * in ttm_bo.c needs to be modified for this.
200 * @lru: The lru list for this memory type.
201 *
202 * This structure is used to identify and manage memory types for a device.
203 * It's set up by the ttm_bo_driver::init_mem_type method.
204 */
205
206struct ttm_mem_type_manager; 182struct ttm_mem_type_manager;
207 183
208struct ttm_mem_type_manager_func { 184struct ttm_mem_type_manager_func {
@@ -287,6 +263,36 @@ struct ttm_mem_type_manager_func {
287 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); 263 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
288}; 264};
289 265
266/**
267 * struct ttm_mem_type_manager
268 *
269 * @has_type: The memory type has been initialized.
270 * @use_type: The memory type is enabled.
271 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
272 * managed by this memory type.
273 * @gpu_offset: If used, the GPU offset of the first managed page of
274 * fixed memory or the first managed location in an aperture.
275 * @size: Size of the managed region.
276 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
277 * as defined in ttm_placement_common.h
278 * @default_caching: The default caching policy used for a buffer object
279 * placed in this memory type if the user doesn't provide one.
280 * @func: structure pointer implementing the range manager. See above
281 * @priv: Driver private closure for @func.
282 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
283 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
284 * reserved by the TTM vm system.
285 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
286 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
287 * static information. bdev::driver::io_mem_free is never used.
288 * @lru: The lru list for this memory type.
289 *
290 * This structure is used to identify and manage memory types for a device.
291 * It's set up by the ttm_bo_driver::init_mem_type method.
292 */
293
294
295
290struct ttm_mem_type_manager { 296struct ttm_mem_type_manager {
291 struct ttm_bo_device *bdev; 297 struct ttm_bo_device *bdev;
292 298
@@ -303,6 +309,15 @@ struct ttm_mem_type_manager {
303 uint32_t default_caching; 309 uint32_t default_caching;
304 const struct ttm_mem_type_manager_func *func; 310 const struct ttm_mem_type_manager_func *func;
305 void *priv; 311 void *priv;
312 struct mutex io_reserve_mutex;
313 bool use_io_reserve_lru;
314 bool io_reserve_fastpath;
315
316 /*
317 * Protected by @io_reserve_mutex:
318 */
319
320 struct list_head io_reserve_lru;
306 321
307 /* 322 /*
308 * Protected by the global->lru_lock. 323 * Protected by the global->lru_lock.
@@ -510,9 +525,12 @@ struct ttm_bo_global {
510 * 525 *
511 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 526 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
512 * @man: An array of mem_type_managers. 527 * @man: An array of mem_type_managers.
528 * @fence_lock: Protects the synchronizing members on *all* bos belonging
529 * to this device.
513 * @addr_space_mm: Range manager for the device address space. 530 * @addr_space_mm: Range manager for the device address space.
514 * lru_lock: Spinlock that protects the buffer+device lru lists and 531 * lru_lock: Spinlock that protects the buffer+device lru lists and
515 * ddestroy lists. 532 * ddestroy lists.
533 * @val_seq: Current validation sequence.
516 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. 534 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
517 * If a GPU lockup has been detected, this is forced to 0. 535 * If a GPU lockup has been detected, this is forced to 0.
518 * @dev_mapping: A pointer to the struct address_space representing the 536 * @dev_mapping: A pointer to the struct address_space representing the
@@ -531,6 +549,7 @@ struct ttm_bo_device {
531 struct ttm_bo_driver *driver; 549 struct ttm_bo_driver *driver;
532 rwlock_t vm_lock; 550 rwlock_t vm_lock;
533 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 551 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
552 spinlock_t fence_lock;
534 /* 553 /*
535 * Protected by the vm lock. 554 * Protected by the vm lock.
536 */ 555 */
@@ -541,6 +560,7 @@ struct ttm_bo_device {
541 * Protected by the global:lru lock. 560 * Protected by the global:lru lock.
542 */ 561 */
543 struct list_head ddestroy; 562 struct list_head ddestroy;
563 uint32_t val_seq;
544 564
545 /* 565 /*
546 * Protected by load / firstopen / lastclose /unload sync. 566 * Protected by load / firstopen / lastclose /unload sync.
@@ -753,31 +773,6 @@ extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
753 773
754extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); 774extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
755 775
756/**
757 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
758 *
759 * @bo Pointer to a struct ttm_buffer_object.
760 * @bus_base On return the base of the PCI region
761 * @bus_offset On return the byte offset into the PCI region
762 * @bus_size On return the byte size of the buffer object or zero if
763 * the buffer object memory is not accessible through a PCI region.
764 *
765 * Returns:
766 * -EINVAL if the buffer object is currently not mappable.
767 * 0 otherwise.
768 */
769
770extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
771 struct ttm_mem_reg *mem,
772 unsigned long *bus_base,
773 unsigned long *bus_offset,
774 unsigned long *bus_size);
775
776extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
777 struct ttm_mem_reg *mem);
778extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
779 struct ttm_mem_reg *mem);
780
781extern void ttm_bo_global_release(struct drm_global_reference *ref); 776extern void ttm_bo_global_release(struct drm_global_reference *ref);
782extern int ttm_bo_global_init(struct drm_global_reference *ref); 777extern int ttm_bo_global_init(struct drm_global_reference *ref);
783 778
@@ -810,6 +805,22 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
810extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 805extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
811 806
812/** 807/**
808 * ttm_bo_unmap_virtual
809 *
810 * @bo: tear down the virtual mappings for this BO
811 *
812 * The caller must take ttm_mem_io_lock before calling this function.
813 */
814extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
815
816extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
817extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
818extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
819 bool interruptible);
820extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
821
822
823/**
813 * ttm_bo_reserve: 824 * ttm_bo_reserve:
814 * 825 *
815 * @bo: A pointer to a struct ttm_buffer_object. 826 * @bo: A pointer to a struct ttm_buffer_object.
@@ -859,11 +870,44 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
859 * try again. (only if use_sequence == 1). 870 * try again. (only if use_sequence == 1).
860 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 871 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
861 * a signal. Release all buffer reservations and return to user-space. 872 * a signal. Release all buffer reservations and return to user-space.
873 * -EBUSY: The function needed to sleep, but @no_wait was true
874 * -EDEADLK: Bo already reserved using @sequence. This error code will only
875 * be returned if @use_sequence is set to true.
862 */ 876 */
863extern int ttm_bo_reserve(struct ttm_buffer_object *bo, 877extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
864 bool interruptible, 878 bool interruptible,
865 bool no_wait, bool use_sequence, uint32_t sequence); 879 bool no_wait, bool use_sequence, uint32_t sequence);
866 880
881
882/**
883 * ttm_bo_reserve_locked:
884 *
885 * @bo: A pointer to a struct ttm_buffer_object.
886 * @interruptible: Sleep interruptible if waiting.
887 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
888 * @use_sequence: If @bo is already reserved, Only sleep waiting for
889 * it to become unreserved if @sequence < (@bo)->sequence.
890 *
891 * Must be called with struct ttm_bo_global::lru_lock held,
892 * and will not remove reserved buffers from the lru lists.
893 * The function may release the LRU spinlock if it needs to sleep.
894 * Otherwise identical to ttm_bo_reserve.
895 *
896 * Returns:
897 * -EAGAIN: The reservation may cause a deadlock.
898 * Release all buffer reservations, wait for @bo to become unreserved and
899 * try again. (only if use_sequence == 1).
900 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
901 * a signal. Release all buffer reservations and return to user-space.
902 * -EBUSY: The function needed to sleep, but @no_wait was true
903 * -EDEADLK: Bo already reserved using @sequence. This error code will only
904 * be returned if @use_sequence is set to true.
905 */
906extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
907 bool interruptible,
908 bool no_wait, bool use_sequence,
909 uint32_t sequence);
910
867/** 911/**
868 * ttm_bo_unreserve 912 * ttm_bo_unreserve
869 * 913 *
@@ -874,6 +918,16 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
874extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); 918extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
875 919
876/** 920/**
921 * ttm_bo_unreserve_locked
922 *
923 * @bo: A pointer to a struct ttm_buffer_object.
924 *
925 * Unreserve a previous reservation of @bo.
926 * Needs to be called with struct ttm_bo_global::lru_lock held.
927 */
928extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
929
930/**
877 * ttm_bo_wait_unreserved 931 * ttm_bo_wait_unreserved
878 * 932 *
879 * @bo: A pointer to a struct ttm_buffer_object. 933 * @bo: A pointer to a struct ttm_buffer_object.
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index cd2c475da9ea..26cc7f9ffa41 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -41,7 +41,10 @@
41 * @bo: refcounted buffer object pointer. 41 * @bo: refcounted buffer object pointer.
42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once 42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
43 * adding a new sync object. 43 * adding a new sync object.
44 * @reservied: Indicates whether @bo has been reserved for validation. 44 * @reserved: Indicates whether @bo has been reserved for validation.
45 * @removed: Indicates whether @bo has been removed from lru lists.
46 * @put_count: Number of outstanding references on bo::list_kref.
47 * @old_sync_obj: Pointer to a sync object about to be unreferenced
45 */ 48 */
46 49
47struct ttm_validate_buffer { 50struct ttm_validate_buffer {
@@ -49,6 +52,9 @@ struct ttm_validate_buffer {
49 struct ttm_buffer_object *bo; 52 struct ttm_buffer_object *bo;
50 void *new_sync_obj_arg; 53 void *new_sync_obj_arg;
51 bool reserved; 54 bool reserved;
55 bool removed;
56 int put_count;
57 void *old_sync_obj;
52}; 58};
53 59
54/** 60/**
@@ -66,7 +72,6 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
66 * function ttm_eu_reserve_buffers 72 * function ttm_eu_reserve_buffers
67 * 73 *
68 * @list: thread private list of ttm_validate_buffer structs. 74 * @list: thread private list of ttm_validate_buffer structs.
69 * @val_seq: A unique sequence number.
70 * 75 *
71 * Tries to reserve bos pointed to by the list entries for validation. 76 * Tries to reserve bos pointed to by the list entries for validation.
72 * If the function returns 0, all buffers are marked as "unfenced", 77 * If the function returns 0, all buffers are marked as "unfenced",
@@ -88,7 +93,7 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
88 * has failed. 93 * has failed.
89 */ 94 */
90 95
91extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); 96extern int ttm_eu_reserve_buffers(struct list_head *list);
92 97
93/** 98/**
94 * function ttm_eu_fence_buffer_objects. 99 * function ttm_eu_fence_buffer_objects.
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 6cc38fc07ab7..d4a62ab2ee5e 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -24,5 +24,7 @@ struct kref {
24void kref_init(struct kref *kref); 24void kref_init(struct kref *kref);
25void kref_get(struct kref *kref); 25void kref_get(struct kref *kref);
26int kref_put(struct kref *kref, void (*release) (struct kref *kref)); 26int kref_put(struct kref *kref, void (*release) (struct kref *kref));
27int kref_sub(struct kref *kref, unsigned int count,
28 void (*release) (struct kref *kref));
27 29
28#endif /* _KREF_H_ */ 30#endif /* _KREF_H_ */