diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2011-08-27 09:43:54 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2011-08-27 10:06:11 -0400 |
commit | 7b1bb388bc879ffcc6c69b567816d5c354afe42b (patch) | |
tree | 5a217fdfb0b5e5a327bdcd624506337c1ae1fe32 /include/drm/ttm | |
parent | 7d754596756240fa918b94cd0c3011c77a638987 (diff) | |
parent | 02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff) |
Merge 'Linux v3.0' into Litmus
Some notes:
* Litmus^RT scheduling class is the topmost scheduling class
(above stop_sched_class).
* scheduler_ipi() function (e.g., in smp_reschedule_interrupt())
may increase IPI latencies.
* Added path into schedule() to quickly re-evaluate scheduling
decision without becoming preemptive again. This used to be
a standard path before the removal of BKL.
Conflicts:
Makefile
arch/arm/kernel/calls.S
arch/arm/kernel/smp.c
arch/x86/include/asm/unistd_32.h
arch/x86/kernel/smp.c
arch/x86/kernel/syscall_table_32.S
include/linux/hrtimer.h
kernel/printk.c
kernel/sched.c
kernel/sched_fair.c
Diffstat (limited to 'include/drm/ttm')
-rw-r--r-- | include/drm/ttm/ttm_bo_api.h | 83 | ||||
-rw-r--r-- | include/drm/ttm/ttm_bo_driver.h | 224 | ||||
-rw-r--r-- | include/drm/ttm/ttm_execbuf_util.h | 11 | ||||
-rw-r--r-- | include/drm/ttm/ttm_page_alloc.h | 8 |
4 files changed, 265 insertions, 61 deletions
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 2040e6c4f172..62a0e4c4ceee 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -50,10 +50,10 @@ struct drm_mm_node; | |||
50 | * | 50 | * |
51 | * @fpfn: first valid page frame number to put the object | 51 | * @fpfn: first valid page frame number to put the object |
52 | * @lpfn: last valid page frame number to put the object | 52 | * @lpfn: last valid page frame number to put the object |
53 | * @num_placement: number of prefered placements | 53 | * @num_placement: number of preferred placements |
54 | * @placement: prefered placements | 54 | * @placement: preferred placements |
55 | * @num_busy_placement: number of prefered placements when need to evict buffer | 55 | * @num_busy_placement: number of preferred placements when need to evict buffer |
56 | * @busy_placement: prefered placements when need to evict buffer | 56 | * @busy_placement: preferred placements when need to evict buffer |
57 | * | 57 | * |
58 | * Structure indicating the placement you request for an object. | 58 | * Structure indicating the placement you request for an object. |
59 | */ | 59 | */ |
@@ -74,6 +74,8 @@ struct ttm_placement { | |||
74 | * @is_iomem: is this io memory ? | 74 | * @is_iomem: is this io memory ? |
75 | * @size: size in byte | 75 | * @size: size in byte |
76 | * @offset: offset from the base address | 76 | * @offset: offset from the base address |
77 | * @io_reserved_vm: The VM system has a refcount in @io_reserved_count | ||
78 | * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve | ||
77 | * | 79 | * |
78 | * Structure indicating the bus placement of an object. | 80 | * Structure indicating the bus placement of an object. |
79 | */ | 81 | */ |
@@ -83,7 +85,8 @@ struct ttm_bus_placement { | |||
83 | unsigned long size; | 85 | unsigned long size; |
84 | unsigned long offset; | 86 | unsigned long offset; |
85 | bool is_iomem; | 87 | bool is_iomem; |
86 | bool io_reserved; | 88 | bool io_reserved_vm; |
89 | uint64_t io_reserved_count; | ||
87 | }; | 90 | }; |
88 | 91 | ||
89 | 92 | ||
@@ -102,7 +105,8 @@ struct ttm_bus_placement { | |||
102 | */ | 105 | */ |
103 | 106 | ||
104 | struct ttm_mem_reg { | 107 | struct ttm_mem_reg { |
105 | struct drm_mm_node *mm_node; | 108 | void *mm_node; |
109 | unsigned long start; | ||
106 | unsigned long size; | 110 | unsigned long size; |
107 | unsigned long num_pages; | 111 | unsigned long num_pages; |
108 | uint32_t page_alignment; | 112 | uint32_t page_alignment; |
@@ -153,11 +157,10 @@ struct ttm_tt; | |||
153 | * keeps one refcount. When this refcount reaches zero, | 157 | * keeps one refcount. When this refcount reaches zero, |
154 | * the object is destroyed. | 158 | * the object is destroyed. |
155 | * @event_queue: Queue for processes waiting on buffer object status change. | 159 | * @event_queue: Queue for processes waiting on buffer object status change. |
156 | * @lock: spinlock protecting mostly synchronization members. | ||
157 | * @mem: structure describing current placement. | 160 | * @mem: structure describing current placement. |
158 | * @persistant_swap_storage: Usually the swap storage is deleted for buffers | 161 | * @persistent_swap_storage: Usually the swap storage is deleted for buffers |
159 | * pinned in physical memory. If this behaviour is not desired, this member | 162 | * pinned in physical memory. If this behaviour is not desired, this member |
160 | * holds a pointer to a persistant shmem object. | 163 | * holds a pointer to a persistent shmem object. |
161 | * @ttm: TTM structure holding system pages. | 164 | * @ttm: TTM structure holding system pages. |
162 | * @evicted: Whether the object was evicted without user-space knowing. | 165 | * @evicted: Whether the object was evicted without user-space knowing. |
163 | * @cpu_writes: For synchronization. Number of cpu writers. | 166 | * @cpu_writes: For synchronization. Number of cpu writers. |
@@ -212,14 +215,13 @@ struct ttm_buffer_object { | |||
212 | struct kref kref; | 215 | struct kref kref; |
213 | struct kref list_kref; | 216 | struct kref list_kref; |
214 | wait_queue_head_t event_queue; | 217 | wait_queue_head_t event_queue; |
215 | spinlock_t lock; | ||
216 | 218 | ||
217 | /** | 219 | /** |
218 | * Members protected by the bo::reserved lock. | 220 | * Members protected by the bo::reserved lock. |
219 | */ | 221 | */ |
220 | 222 | ||
221 | struct ttm_mem_reg mem; | 223 | struct ttm_mem_reg mem; |
222 | struct file *persistant_swap_storage; | 224 | struct file *persistent_swap_storage; |
223 | struct ttm_tt *ttm; | 225 | struct ttm_tt *ttm; |
224 | bool evicted; | 226 | bool evicted; |
225 | 227 | ||
@@ -236,6 +238,7 @@ struct ttm_buffer_object { | |||
236 | struct list_head lru; | 238 | struct list_head lru; |
237 | struct list_head ddestroy; | 239 | struct list_head ddestroy; |
238 | struct list_head swap; | 240 | struct list_head swap; |
241 | struct list_head io_reserve_lru; | ||
239 | uint32_t val_seq; | 242 | uint32_t val_seq; |
240 | bool seq_valid; | 243 | bool seq_valid; |
241 | 244 | ||
@@ -247,10 +250,10 @@ struct ttm_buffer_object { | |||
247 | atomic_t reserved; | 250 | atomic_t reserved; |
248 | 251 | ||
249 | /** | 252 | /** |
250 | * Members protected by the bo::lock | 253 | * Members protected by struct buffer_object_device::fence_lock |
251 | * In addition, setting sync_obj to anything else | 254 | * In addition, setting sync_obj to anything else |
252 | * than NULL requires bo::reserved to be held. This allows for | 255 | * than NULL requires bo::reserved to be held. This allows for |
253 | * checking NULL while reserved but not holding bo::lock. | 256 | * checking NULL while reserved but not holding the mentioned lock. |
254 | */ | 257 | */ |
255 | 258 | ||
256 | void *sync_obj_arg; | 259 | void *sync_obj_arg; |
@@ -363,6 +366,44 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo, | |||
363 | */ | 366 | */ |
364 | extern void ttm_bo_unref(struct ttm_buffer_object **bo); | 367 | extern void ttm_bo_unref(struct ttm_buffer_object **bo); |
365 | 368 | ||
369 | |||
370 | /** | ||
371 | * ttm_bo_list_ref_sub | ||
372 | * | ||
373 | * @bo: The buffer object. | ||
374 | * @count: The number of references with which to decrease @bo::list_kref; | ||
375 | * @never_free: The refcount should not reach zero with this operation. | ||
376 | * | ||
377 | * Release @count lru list references to this buffer object. | ||
378 | */ | ||
379 | extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, | ||
380 | bool never_free); | ||
381 | |||
382 | /** | ||
383 | * ttm_bo_add_to_lru | ||
384 | * | ||
385 | * @bo: The buffer object. | ||
386 | * | ||
387 | * Add this bo to the relevant mem type lru and, if it's backed by | ||
388 | * system pages (ttms) to the swap list. | ||
389 | * This function must be called with struct ttm_bo_global::lru_lock held, and | ||
390 | * is typically called immediately prior to unreserving a bo. | ||
391 | */ | ||
392 | extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); | ||
393 | |||
394 | /** | ||
395 | * ttm_bo_del_from_lru | ||
396 | * | ||
397 | * @bo: The buffer object. | ||
398 | * | ||
399 | * Remove this bo from all lru lists used to lookup and reserve an object. | ||
400 | * This function must be called with struct ttm_bo_global::lru_lock held, | ||
401 | * and is usually called just immediately after the bo has been reserved to | ||
402 | * avoid recursive reservation from lru lists. | ||
403 | */ | ||
404 | extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); | ||
405 | |||
406 | |||
366 | /** | 407 | /** |
367 | * ttm_bo_lock_delayed_workqueue | 408 | * ttm_bo_lock_delayed_workqueue |
368 | * | 409 | * |
@@ -418,9 +459,9 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); | |||
418 | * user buffer object. | 459 | * user buffer object. |
419 | * @interruptible: If needing to sleep to wait for GPU resources, | 460 | * @interruptible: If needing to sleep to wait for GPU resources, |
420 | * sleep interruptible. | 461 | * sleep interruptible. |
421 | * @persistant_swap_storage: Usually the swap storage is deleted for buffers | 462 | * @persistent_swap_storage: Usually the swap storage is deleted for buffers |
422 | * pinned in physical memory. If this behaviour is not desired, this member | 463 | * pinned in physical memory. If this behaviour is not desired, this member |
423 | * holds a pointer to a persistant shmem object. Typically, this would | 464 | * holds a pointer to a persistent shmem object. Typically, this would |
424 | * point to the shmem object backing a GEM object if TTM is used to back a | 465 | * point to the shmem object backing a GEM object if TTM is used to back a |
425 | * GEM user interface. | 466 | * GEM user interface. |
426 | * @acc_size: Accounted size for this object. | 467 | * @acc_size: Accounted size for this object. |
@@ -431,6 +472,10 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); | |||
431 | * together with the @destroy function, | 472 | * together with the @destroy function, |
432 | * enables driver-specific objects derived from a ttm_buffer_object. | 473 | * enables driver-specific objects derived from a ttm_buffer_object. |
433 | * On successful return, the object kref and list_kref are set to 1. | 474 | * On successful return, the object kref and list_kref are set to 1. |
475 | * If a failure occurs, the function will call the @destroy function, or | ||
476 | * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is | ||
477 | * illegal and will likely cause memory corruption. | ||
478 | * | ||
434 | * Returns | 479 | * Returns |
435 | * -ENOMEM: Out of memory. | 480 | * -ENOMEM: Out of memory. |
436 | * -EINVAL: Invalid placement flags. | 481 | * -EINVAL: Invalid placement flags. |
@@ -445,7 +490,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev, | |||
445 | uint32_t page_alignment, | 490 | uint32_t page_alignment, |
446 | unsigned long buffer_start, | 491 | unsigned long buffer_start, |
447 | bool interrubtible, | 492 | bool interrubtible, |
448 | struct file *persistant_swap_storage, | 493 | struct file *persistent_swap_storage, |
449 | size_t acc_size, | 494 | size_t acc_size, |
450 | void (*destroy) (struct ttm_buffer_object *)); | 495 | void (*destroy) (struct ttm_buffer_object *)); |
451 | /** | 496 | /** |
@@ -461,9 +506,9 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev, | |||
461 | * user buffer object. | 506 | * user buffer object. |
462 | * @interruptible: If needing to sleep while waiting for GPU resources, | 507 | * @interruptible: If needing to sleep while waiting for GPU resources, |
463 | * sleep interruptible. | 508 | * sleep interruptible. |
464 | * @persistant_swap_storage: Usually the swap storage is deleted for buffers | 509 | * @persistent_swap_storage: Usually the swap storage is deleted for buffers |
465 | * pinned in physical memory. If this behaviour is not desired, this member | 510 | * pinned in physical memory. If this behaviour is not desired, this member |
466 | * holds a pointer to a persistant shmem object. Typically, this would | 511 | * holds a pointer to a persistent shmem object. Typically, this would |
467 | * point to the shmem object backing a GEM object if TTM is used to back a | 512 | * point to the shmem object backing a GEM object if TTM is used to back a |
468 | * GEM user interface. | 513 | * GEM user interface. |
469 | * @p_bo: On successful completion *p_bo points to the created object. | 514 | * @p_bo: On successful completion *p_bo points to the created object. |
@@ -483,7 +528,7 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev, | |||
483 | uint32_t page_alignment, | 528 | uint32_t page_alignment, |
484 | unsigned long buffer_start, | 529 | unsigned long buffer_start, |
485 | bool interruptible, | 530 | bool interruptible, |
486 | struct file *persistant_swap_storage, | 531 | struct file *persistent_swap_storage, |
487 | struct ttm_buffer_object **p_bo); | 532 | struct ttm_buffer_object **p_bo); |
488 | 533 | ||
489 | /** | 534 | /** |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index b87504235f18..09af2d746d1c 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -50,13 +50,15 @@ struct ttm_backend_func { | |||
50 | * @pages: Array of pointers to ttm pages. | 50 | * @pages: Array of pointers to ttm pages. |
51 | * @dummy_read_page: Page to be used instead of NULL pages in the | 51 | * @dummy_read_page: Page to be used instead of NULL pages in the |
52 | * array @pages. | 52 | * array @pages. |
53 | * @dma_addrs: Array of DMA (bus) address of the ttm pages. | ||
53 | * | 54 | * |
54 | * Populate the backend with ttm pages. Depending on the backend, | 55 | * Populate the backend with ttm pages. Depending on the backend, |
55 | * it may or may not copy the @pages array. | 56 | * it may or may not copy the @pages array. |
56 | */ | 57 | */ |
57 | int (*populate) (struct ttm_backend *backend, | 58 | int (*populate) (struct ttm_backend *backend, |
58 | unsigned long num_pages, struct page **pages, | 59 | unsigned long num_pages, struct page **pages, |
59 | struct page *dummy_read_page); | 60 | struct page *dummy_read_page, |
61 | dma_addr_t *dma_addrs); | ||
60 | /** | 62 | /** |
61 | * struct ttm_backend_func member clear | 63 | * struct ttm_backend_func member clear |
62 | * | 64 | * |
@@ -120,7 +122,7 @@ struct ttm_backend { | |||
120 | #define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) | 122 | #define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) |
121 | #define TTM_PAGE_FLAG_WRITE (1 << 3) | 123 | #define TTM_PAGE_FLAG_WRITE (1 << 3) |
122 | #define TTM_PAGE_FLAG_SWAPPED (1 << 4) | 124 | #define TTM_PAGE_FLAG_SWAPPED (1 << 4) |
123 | #define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) | 125 | #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5) |
124 | #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) | 126 | #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) |
125 | #define TTM_PAGE_FLAG_DMA32 (1 << 7) | 127 | #define TTM_PAGE_FLAG_DMA32 (1 << 7) |
126 | 128 | ||
@@ -149,6 +151,7 @@ enum ttm_caching_state { | |||
149 | * @swap_storage: Pointer to shmem struct file for swap storage. | 151 | * @swap_storage: Pointer to shmem struct file for swap storage. |
150 | * @caching_state: The current caching state of the pages. | 152 | * @caching_state: The current caching state of the pages. |
151 | * @state: The current binding state of the pages. | 153 | * @state: The current binding state of the pages. |
154 | * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32) | ||
152 | * | 155 | * |
153 | * This is a structure holding the pages, caching- and aperture binding | 156 | * This is a structure holding the pages, caching- and aperture binding |
154 | * status for a buffer object that isn't backed by fixed (VRAM / AGP) | 157 | * status for a buffer object that isn't backed by fixed (VRAM / AGP) |
@@ -173,12 +176,97 @@ struct ttm_tt { | |||
173 | tt_unbound, | 176 | tt_unbound, |
174 | tt_unpopulated, | 177 | tt_unpopulated, |
175 | } state; | 178 | } state; |
179 | dma_addr_t *dma_address; | ||
176 | }; | 180 | }; |
177 | 181 | ||
178 | #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ | 182 | #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ |
179 | #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ | 183 | #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ |
180 | #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ | 184 | #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ |
181 | 185 | ||
186 | struct ttm_mem_type_manager; | ||
187 | |||
188 | struct ttm_mem_type_manager_func { | ||
189 | /** | ||
190 | * struct ttm_mem_type_manager member init | ||
191 | * | ||
192 | * @man: Pointer to a memory type manager. | ||
193 | * @p_size: Implementation dependent, but typically the size of the | ||
194 | * range to be managed in pages. | ||
195 | * | ||
196 | * Called to initialize a private range manager. The function is | ||
197 | * expected to initialize the man::priv member. | ||
198 | * Returns 0 on success, negative error code on failure. | ||
199 | */ | ||
200 | int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); | ||
201 | |||
202 | /** | ||
203 | * struct ttm_mem_type_manager member takedown | ||
204 | * | ||
205 | * @man: Pointer to a memory type manager. | ||
206 | * | ||
207 | * Called to undo the setup done in init. All allocated resources | ||
208 | * should be freed. | ||
209 | */ | ||
210 | int (*takedown)(struct ttm_mem_type_manager *man); | ||
211 | |||
212 | /** | ||
213 | * struct ttm_mem_type_manager member get_node | ||
214 | * | ||
215 | * @man: Pointer to a memory type manager. | ||
216 | * @bo: Pointer to the buffer object we're allocating space for. | ||
217 | * @placement: Placement details. | ||
218 | * @mem: Pointer to a struct ttm_mem_reg to be filled in. | ||
219 | * | ||
220 | * This function should allocate space in the memory type managed | ||
221 | * by @man. Placement details if | ||
222 | * applicable are given by @placement. If successful, | ||
223 | * @mem::mm_node should be set to a non-null value, and | ||
224 | * @mem::start should be set to a value identifying the beginning | ||
225 | * of the range allocated, and the function should return zero. | ||
226 | * If the memory region accommodate the buffer object, @mem::mm_node | ||
227 | * should be set to NULL, and the function should return 0. | ||
228 | * If a system error occurred, preventing the request to be fulfilled, | ||
229 | * the function should return a negative error code. | ||
230 | * | ||
231 | * Note that @mem::mm_node will only be dereferenced by | ||
232 | * struct ttm_mem_type_manager functions and optionally by the driver, | ||
233 | * which has knowledge of the underlying type. | ||
234 | * | ||
235 | * This function may not be called from within atomic context, so | ||
236 | * an implementation can and must use either a mutex or a spinlock to | ||
237 | * protect any data structures managing the space. | ||
238 | */ | ||
239 | int (*get_node)(struct ttm_mem_type_manager *man, | ||
240 | struct ttm_buffer_object *bo, | ||
241 | struct ttm_placement *placement, | ||
242 | struct ttm_mem_reg *mem); | ||
243 | |||
244 | /** | ||
245 | * struct ttm_mem_type_manager member put_node | ||
246 | * | ||
247 | * @man: Pointer to a memory type manager. | ||
248 | * @mem: Pointer to a struct ttm_mem_reg to be filled in. | ||
249 | * | ||
250 | * This function frees memory type resources previously allocated | ||
251 | * and that are identified by @mem::mm_node and @mem::start. May not | ||
252 | * be called from within atomic context. | ||
253 | */ | ||
254 | void (*put_node)(struct ttm_mem_type_manager *man, | ||
255 | struct ttm_mem_reg *mem); | ||
256 | |||
257 | /** | ||
258 | * struct ttm_mem_type_manager member debug | ||
259 | * | ||
260 | * @man: Pointer to a memory type manager. | ||
261 | * @prefix: Prefix to be used in printout to identify the caller. | ||
262 | * | ||
263 | * This function is called to print out the state of the memory | ||
264 | * type manager to aid debugging of out-of-memory conditions. | ||
265 | * It may not be called from within atomic context. | ||
266 | */ | ||
267 | void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); | ||
268 | }; | ||
269 | |||
182 | /** | 270 | /** |
183 | * struct ttm_mem_type_manager | 271 | * struct ttm_mem_type_manager |
184 | * | 272 | * |
@@ -193,17 +281,24 @@ struct ttm_tt { | |||
193 | * as defined in ttm_placement_common.h | 281 | * as defined in ttm_placement_common.h |
194 | * @default_caching: The default caching policy used for a buffer object | 282 | * @default_caching: The default caching policy used for a buffer object |
195 | * placed in this memory type if the user doesn't provide one. | 283 | * placed in this memory type if the user doesn't provide one. |
196 | * @manager: The range manager used for this memory type. FIXME: If the aperture | 284 | * @func: structure pointer implementing the range manager. See above |
197 | * has a page size different from the underlying system, the granularity | 285 | * @priv: Driver private closure for @func. |
198 | * of this manager should take care of this. But the range allocating code | 286 | * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures |
199 | * in ttm_bo.c needs to be modified for this. | 287 | * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions |
288 | * reserved by the TTM vm system. | ||
289 | * @io_reserve_lru: Optional lru list for unreserving io mem regions. | ||
290 | * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain | ||
291 | * static information. bdev::driver::io_mem_free is never used. | ||
200 | * @lru: The lru list for this memory type. | 292 | * @lru: The lru list for this memory type. |
201 | * | 293 | * |
202 | * This structure is used to identify and manage memory types for a device. | 294 | * This structure is used to identify and manage memory types for a device. |
203 | * It's set up by the ttm_bo_driver::init_mem_type method. | 295 | * It's set up by the ttm_bo_driver::init_mem_type method. |
204 | */ | 296 | */ |
205 | 297 | ||
298 | |||
299 | |||
206 | struct ttm_mem_type_manager { | 300 | struct ttm_mem_type_manager { |
301 | struct ttm_bo_device *bdev; | ||
207 | 302 | ||
208 | /* | 303 | /* |
209 | * No protection. Constant from start. | 304 | * No protection. Constant from start. |
@@ -216,14 +311,22 @@ struct ttm_mem_type_manager { | |||
216 | uint64_t size; | 311 | uint64_t size; |
217 | uint32_t available_caching; | 312 | uint32_t available_caching; |
218 | uint32_t default_caching; | 313 | uint32_t default_caching; |
314 | const struct ttm_mem_type_manager_func *func; | ||
315 | void *priv; | ||
316 | struct mutex io_reserve_mutex; | ||
317 | bool use_io_reserve_lru; | ||
318 | bool io_reserve_fastpath; | ||
319 | |||
320 | /* | ||
321 | * Protected by @io_reserve_mutex: | ||
322 | */ | ||
323 | |||
324 | struct list_head io_reserve_lru; | ||
219 | 325 | ||
220 | /* | 326 | /* |
221 | * Protected by the bdev->lru_lock. | 327 | * Protected by the global->lru_lock. |
222 | * TODO: Consider one lru_lock per ttm_mem_type_manager. | ||
223 | * Plays ill with list removal, though. | ||
224 | */ | 328 | */ |
225 | 329 | ||
226 | struct drm_mm manager; | ||
227 | struct list_head lru; | 330 | struct list_head lru; |
228 | }; | 331 | }; |
229 | 332 | ||
@@ -426,9 +529,12 @@ struct ttm_bo_global { | |||
426 | * | 529 | * |
427 | * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. | 530 | * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. |
428 | * @man: An array of mem_type_managers. | 531 | * @man: An array of mem_type_managers. |
532 | * @fence_lock: Protects the synchronizing members on *all* bos belonging | ||
533 | * to this device. | ||
429 | * @addr_space_mm: Range manager for the device address space. | 534 | * @addr_space_mm: Range manager for the device address space. |
430 | * lru_lock: Spinlock that protects the buffer+device lru lists and | 535 | * lru_lock: Spinlock that protects the buffer+device lru lists and |
431 | * ddestroy lists. | 536 | * ddestroy lists. |
537 | * @val_seq: Current validation sequence. | ||
432 | * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. | 538 | * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. |
433 | * If a GPU lockup has been detected, this is forced to 0. | 539 | * If a GPU lockup has been detected, this is forced to 0. |
434 | * @dev_mapping: A pointer to the struct address_space representing the | 540 | * @dev_mapping: A pointer to the struct address_space representing the |
@@ -447,6 +553,7 @@ struct ttm_bo_device { | |||
447 | struct ttm_bo_driver *driver; | 553 | struct ttm_bo_driver *driver; |
448 | rwlock_t vm_lock; | 554 | rwlock_t vm_lock; |
449 | struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; | 555 | struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; |
556 | spinlock_t fence_lock; | ||
450 | /* | 557 | /* |
451 | * Protected by the vm lock. | 558 | * Protected by the vm lock. |
452 | */ | 559 | */ |
@@ -457,6 +564,7 @@ struct ttm_bo_device { | |||
457 | * Protected by the global:lru lock. | 564 | * Protected by the global:lru lock. |
458 | */ | 565 | */ |
459 | struct list_head ddestroy; | 566 | struct list_head ddestroy; |
567 | uint32_t val_seq; | ||
460 | 568 | ||
461 | /* | 569 | /* |
462 | * Protected by load / firstopen / lastclose /unload sync. | 570 | * Protected by load / firstopen / lastclose /unload sync. |
@@ -606,7 +714,7 @@ extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages); | |||
606 | */ | 714 | */ |
607 | extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); | 715 | extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); |
608 | extern int ttm_tt_swapout(struct ttm_tt *ttm, | 716 | extern int ttm_tt_swapout(struct ttm_tt *ttm, |
609 | struct file *persistant_swap_storage); | 717 | struct file *persistent_swap_storage); |
610 | 718 | ||
611 | /* | 719 | /* |
612 | * ttm_bo.c | 720 | * ttm_bo.c |
@@ -649,6 +757,12 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
649 | struct ttm_mem_reg *mem, | 757 | struct ttm_mem_reg *mem, |
650 | bool interruptible, | 758 | bool interruptible, |
651 | bool no_wait_reserve, bool no_wait_gpu); | 759 | bool no_wait_reserve, bool no_wait_gpu); |
760 | |||
761 | extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, | ||
762 | struct ttm_mem_reg *mem); | ||
763 | extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, | ||
764 | struct ttm_mem_reg *mem); | ||
765 | |||
652 | /** | 766 | /** |
653 | * ttm_bo_wait_for_cpu | 767 | * ttm_bo_wait_for_cpu |
654 | * | 768 | * |
@@ -663,31 +777,6 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
663 | 777 | ||
664 | extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); | 778 | extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); |
665 | 779 | ||
666 | /** | ||
667 | * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory. | ||
668 | * | ||
669 | * @bo Pointer to a struct ttm_buffer_object. | ||
670 | * @bus_base On return the base of the PCI region | ||
671 | * @bus_offset On return the byte offset into the PCI region | ||
672 | * @bus_size On return the byte size of the buffer object or zero if | ||
673 | * the buffer object memory is not accessible through a PCI region. | ||
674 | * | ||
675 | * Returns: | ||
676 | * -EINVAL if the buffer object is currently not mappable. | ||
677 | * 0 otherwise. | ||
678 | */ | ||
679 | |||
680 | extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, | ||
681 | struct ttm_mem_reg *mem, | ||
682 | unsigned long *bus_base, | ||
683 | unsigned long *bus_offset, | ||
684 | unsigned long *bus_size); | ||
685 | |||
686 | extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev, | ||
687 | struct ttm_mem_reg *mem); | ||
688 | extern void ttm_mem_io_free(struct ttm_bo_device *bdev, | ||
689 | struct ttm_mem_reg *mem); | ||
690 | |||
691 | extern void ttm_bo_global_release(struct drm_global_reference *ref); | 780 | extern void ttm_bo_global_release(struct drm_global_reference *ref); |
692 | extern int ttm_bo_global_init(struct drm_global_reference *ref); | 781 | extern int ttm_bo_global_init(struct drm_global_reference *ref); |
693 | 782 | ||
@@ -720,6 +809,22 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
720 | extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | 809 | extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); |
721 | 810 | ||
722 | /** | 811 | /** |
812 | * ttm_bo_unmap_virtual | ||
813 | * | ||
814 | * @bo: tear down the virtual mappings for this BO | ||
815 | * | ||
816 | * The caller must take ttm_mem_io_lock before calling this function. | ||
817 | */ | ||
818 | extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); | ||
819 | |||
820 | extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); | ||
821 | extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); | ||
822 | extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, | ||
823 | bool interruptible); | ||
824 | extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); | ||
825 | |||
826 | |||
827 | /** | ||
723 | * ttm_bo_reserve: | 828 | * ttm_bo_reserve: |
724 | * | 829 | * |
725 | * @bo: A pointer to a struct ttm_buffer_object. | 830 | * @bo: A pointer to a struct ttm_buffer_object. |
@@ -736,7 +841,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | |||
736 | * different order, either by will or as a result of a buffer being evicted | 841 | * different order, either by will or as a result of a buffer being evicted |
737 | * to make room for a buffer already reserved. (Buffers are reserved before | 842 | * to make room for a buffer already reserved. (Buffers are reserved before |
738 | * they are evicted). The following algorithm prevents such deadlocks from | 843 | * they are evicted). The following algorithm prevents such deadlocks from |
739 | * occuring: | 844 | * occurring: |
740 | * 1) Buffers are reserved with the lru spinlock held. Upon successful | 845 | * 1) Buffers are reserved with the lru spinlock held. Upon successful |
741 | * reservation they are removed from the lru list. This stops a reserved buffer | 846 | * reservation they are removed from the lru list. This stops a reserved buffer |
742 | * from being evicted. However the lru spinlock is released between the time | 847 | * from being evicted. However the lru spinlock is released between the time |
@@ -769,11 +874,44 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | |||
769 | * try again. (only if use_sequence == 1). | 874 | * try again. (only if use_sequence == 1). |
770 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by | 875 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
771 | * a signal. Release all buffer reservations and return to user-space. | 876 | * a signal. Release all buffer reservations and return to user-space. |
877 | * -EBUSY: The function needed to sleep, but @no_wait was true | ||
878 | * -EDEADLK: Bo already reserved using @sequence. This error code will only | ||
879 | * be returned if @use_sequence is set to true. | ||
772 | */ | 880 | */ |
773 | extern int ttm_bo_reserve(struct ttm_buffer_object *bo, | 881 | extern int ttm_bo_reserve(struct ttm_buffer_object *bo, |
774 | bool interruptible, | 882 | bool interruptible, |
775 | bool no_wait, bool use_sequence, uint32_t sequence); | 883 | bool no_wait, bool use_sequence, uint32_t sequence); |
776 | 884 | ||
885 | |||
886 | /** | ||
887 | * ttm_bo_reserve_locked: | ||
888 | * | ||
889 | * @bo: A pointer to a struct ttm_buffer_object. | ||
890 | * @interruptible: Sleep interruptible if waiting. | ||
891 | * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. | ||
892 | * @use_sequence: If @bo is already reserved, Only sleep waiting for | ||
893 | * it to become unreserved if @sequence < (@bo)->sequence. | ||
894 | * | ||
895 | * Must be called with struct ttm_bo_global::lru_lock held, | ||
896 | * and will not remove reserved buffers from the lru lists. | ||
897 | * The function may release the LRU spinlock if it needs to sleep. | ||
898 | * Otherwise identical to ttm_bo_reserve. | ||
899 | * | ||
900 | * Returns: | ||
901 | * -EAGAIN: The reservation may cause a deadlock. | ||
902 | * Release all buffer reservations, wait for @bo to become unreserved and | ||
903 | * try again. (only if use_sequence == 1). | ||
904 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by | ||
905 | * a signal. Release all buffer reservations and return to user-space. | ||
906 | * -EBUSY: The function needed to sleep, but @no_wait was true | ||
907 | * -EDEADLK: Bo already reserved using @sequence. This error code will only | ||
908 | * be returned if @use_sequence is set to true. | ||
909 | */ | ||
910 | extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | ||
911 | bool interruptible, | ||
912 | bool no_wait, bool use_sequence, | ||
913 | uint32_t sequence); | ||
914 | |||
777 | /** | 915 | /** |
778 | * ttm_bo_unreserve | 916 | * ttm_bo_unreserve |
779 | * | 917 | * |
@@ -784,6 +922,16 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, | |||
784 | extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); | 922 | extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); |
785 | 923 | ||
786 | /** | 924 | /** |
925 | * ttm_bo_unreserve_locked | ||
926 | * | ||
927 | * @bo: A pointer to a struct ttm_buffer_object. | ||
928 | * | ||
929 | * Unreserve a previous reservation of @bo. | ||
930 | * Needs to be called with struct ttm_bo_global::lru_lock held. | ||
931 | */ | ||
932 | extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); | ||
933 | |||
934 | /** | ||
787 | * ttm_bo_wait_unreserved | 935 | * ttm_bo_wait_unreserved |
788 | * | 936 | * |
789 | * @bo: A pointer to a struct ttm_buffer_object. | 937 | * @bo: A pointer to a struct ttm_buffer_object. |
@@ -891,6 +1039,8 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
891 | */ | 1039 | */ |
892 | extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); | 1040 | extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); |
893 | 1041 | ||
1042 | extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; | ||
1043 | |||
894 | #if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) | 1044 | #if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) |
895 | #define TTM_HAS_AGP | 1045 | #define TTM_HAS_AGP |
896 | #include <linux/agp_backend.h> | 1046 | #include <linux/agp_backend.h> |
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index cd2c475da9ea..26cc7f9ffa41 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h | |||
@@ -41,7 +41,10 @@ | |||
41 | * @bo: refcounted buffer object pointer. | 41 | * @bo: refcounted buffer object pointer. |
42 | * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once | 42 | * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once |
43 | * adding a new sync object. | 43 | * adding a new sync object. |
44 | * @reservied: Indicates whether @bo has been reserved for validation. | 44 | * @reserved: Indicates whether @bo has been reserved for validation. |
45 | * @removed: Indicates whether @bo has been removed from lru lists. | ||
46 | * @put_count: Number of outstanding references on bo::list_kref. | ||
47 | * @old_sync_obj: Pointer to a sync object about to be unreferenced | ||
45 | */ | 48 | */ |
46 | 49 | ||
47 | struct ttm_validate_buffer { | 50 | struct ttm_validate_buffer { |
@@ -49,6 +52,9 @@ struct ttm_validate_buffer { | |||
49 | struct ttm_buffer_object *bo; | 52 | struct ttm_buffer_object *bo; |
50 | void *new_sync_obj_arg; | 53 | void *new_sync_obj_arg; |
51 | bool reserved; | 54 | bool reserved; |
55 | bool removed; | ||
56 | int put_count; | ||
57 | void *old_sync_obj; | ||
52 | }; | 58 | }; |
53 | 59 | ||
54 | /** | 60 | /** |
@@ -66,7 +72,6 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); | |||
66 | * function ttm_eu_reserve_buffers | 72 | * function ttm_eu_reserve_buffers |
67 | * | 73 | * |
68 | * @list: thread private list of ttm_validate_buffer structs. | 74 | * @list: thread private list of ttm_validate_buffer structs. |
69 | * @val_seq: A unique sequence number. | ||
70 | * | 75 | * |
71 | * Tries to reserve bos pointed to by the list entries for validation. | 76 | * Tries to reserve bos pointed to by the list entries for validation. |
72 | * If the function returns 0, all buffers are marked as "unfenced", | 77 | * If the function returns 0, all buffers are marked as "unfenced", |
@@ -88,7 +93,7 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); | |||
88 | * has failed. | 93 | * has failed. |
89 | */ | 94 | */ |
90 | 95 | ||
91 | extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); | 96 | extern int ttm_eu_reserve_buffers(struct list_head *list); |
92 | 97 | ||
93 | /** | 98 | /** |
94 | * function ttm_eu_fence_buffer_objects. | 99 | * function ttm_eu_fence_buffer_objects. |
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index 116821448c38..8062890f725e 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h | |||
@@ -36,11 +36,13 @@ | |||
36 | * @flags: ttm flags for page allocation. | 36 | * @flags: ttm flags for page allocation. |
37 | * @cstate: ttm caching state for the page. | 37 | * @cstate: ttm caching state for the page. |
38 | * @count: number of pages to allocate. | 38 | * @count: number of pages to allocate. |
39 | * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set). | ||
39 | */ | 40 | */ |
40 | int ttm_get_pages(struct list_head *pages, | 41 | int ttm_get_pages(struct list_head *pages, |
41 | int flags, | 42 | int flags, |
42 | enum ttm_caching_state cstate, | 43 | enum ttm_caching_state cstate, |
43 | unsigned count); | 44 | unsigned count, |
45 | dma_addr_t *dma_address); | ||
44 | /** | 46 | /** |
45 | * Put linked list of pages to pool. | 47 | * Put linked list of pages to pool. |
46 | * | 48 | * |
@@ -49,11 +51,13 @@ int ttm_get_pages(struct list_head *pages, | |||
49 | * count. | 51 | * count. |
50 | * @flags: ttm flags for page allocation. | 52 | * @flags: ttm flags for page allocation. |
51 | * @cstate: ttm caching state. | 53 | * @cstate: ttm caching state. |
54 | * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set). | ||
52 | */ | 55 | */ |
53 | void ttm_put_pages(struct list_head *pages, | 56 | void ttm_put_pages(struct list_head *pages, |
54 | unsigned page_count, | 57 | unsigned page_count, |
55 | int flags, | 58 | int flags, |
56 | enum ttm_caching_state cstate); | 59 | enum ttm_caching_state cstate, |
60 | dma_addr_t *dma_address); | ||
57 | /** | 61 | /** |
58 | * Initialize pool allocator. | 62 | * Initialize pool allocator. |
59 | */ | 63 | */ |